code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase_( _A :int )-> Union[str, Any]:
for i in range(len(_A ) - 1 , 0 , -1 ):
UpperCamelCase__ = False
for j in range(_A , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCamelCase__, UpperCamelCase__ = unsorted[j - 1], unsorted[j]
UpperCamelCase__ = True
for j in range(_A ):
if unsorted[j] > unsorted[j + 1]:
UpperCamelCase__, UpperCamelCase__ = unsorted[j + 1], unsorted[j]
UpperCamelCase__ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCamelCase = [int(item) for item in user_input.split(',')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 551 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = "bert"
def __init__( self: str , UpperCamelCase_: List[Any]=3_0522 , UpperCamelCase_: Optional[Any]=768 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=12 , UpperCamelCase_: Any=3072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=512 , UpperCamelCase_: str=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: int=1e-1_2 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: int="absolute" , UpperCamelCase_: int=True , UpperCamelCase_: str=None , **UpperCamelCase_: List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =vocab_size
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =hidden_act
UpperCamelCase_ =intermediate_size
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =max_position_embeddings
UpperCamelCase_ =type_vocab_size
UpperCamelCase_ =initializer_range
UpperCamelCase_ =layer_norm_eps
UpperCamelCase_ =position_embedding_type
UpperCamelCase_ =use_cache
UpperCamelCase_ =classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self: int ):
if self.task == "multiple-choice":
UpperCamelCase_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 391 | 0 |
import os
import sys
import unittest
_UpperCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : Optional[Any] = os.path.join(git_repo_path, """src""", """transformers""")
_UpperCAmelCase : str = """
{0} = None
"""
_UpperCAmelCase : int = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
_UpperCAmelCase : Any = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(snake_case )
snake_case_ = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(snake_case , 'tokenizers' )
snake_case_ = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(snake_case , 'tensorflow_text' )
snake_case_ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(snake_case , 'sentencepiece_and_tokenizers' )
snake_case_ = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(snake_case , 'sentencepiece_and_tensorflow_text' )
snake_case_ = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(snake_case , 'sentencepiece_and_tokenizers_and_vision' )
def a ( self ):
snake_case_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , snake_case )
self.assertIn('tensorflow_text' , snake_case )
self.assertIn('sentencepiece_and_tokenizers' , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def a ( self ):
snake_case_ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(snake_case , '\nCONSTANT = None\n' )
snake_case_ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
snake_case , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case_ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case_ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , snake_case )
| 108 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = len(UpperCamelCase__ )
snake_case_ = len(UpperCamelCase__ )
snake_case_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case_ = []
for char_count in range(UpperCamelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 108 | 1 |
import csv
import tweepy
# Twitter API credentials
_snake_case = ''''''
_snake_case = ''''''
_snake_case = ''''''
_snake_case = ''''''
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
auth.set_access_token(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = tweepy.API(SCREAMING_SNAKE_CASE_ )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase : Optional[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase : str = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE_ , count=200 )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE_ )
# save the id of the oldest tweet less one
lowerCamelCase : Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase : Any = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE_ , count=200 , max_id=SCREAMING_SNAKE_CASE_ )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE_ )
# update the id of the oldest tweet less one
lowerCamelCase : int = alltweets[-1].id - 1
print(f"""...{len(SCREAMING_SNAKE_CASE_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase : List[str] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , "w" ) as f:
lowerCamelCase : int = csv.writer(SCREAMING_SNAKE_CASE_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 340 |
import torch
from diffusers import DiffusionPipeline
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
def __call__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Optional[int] = self.unet(__A , __A ).sample
lowerCamelCase : List[str] = self.scheduler.step(__A , __A , __A ).prev_sample
lowerCamelCase : str = scheduler_output - scheduler_output + torch.ones_like(__A )
return result
| 340 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowercase = get_tests_dir('''fixtures''')
__lowercase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowercase = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = 0
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(_lowercase , _lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :str = AutoFeatureExtractor.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
def UpperCamelCase__ ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :Optional[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__UpperCamelCase :Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowercase).to_dict()
config_dict.pop('''feature_extractor_type''')
__UpperCamelCase :List[Any] = WavaVecaFeatureExtractor(**_lowercase)
# save in new folder
model_config.save_pretrained(_lowercase)
config.save_pretrained(_lowercase)
__UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(_lowercase)
# make sure private variable is not incorrectly saved
__UpperCamelCase :Optional[int] = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(_lowercase , _lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowercase , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :Tuple = AutoFeatureExtractor.from_pretrained('''bert-base''')
def UpperCamelCase__ ( self) -> int:
with self.assertRaisesRegex(
_lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :int = AutoFeatureExtractor.from_pretrained(_lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> Any:
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__UpperCamelCase :Any = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''')
def UpperCamelCase__ ( self) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase):
__UpperCamelCase :Any = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase):
__UpperCamelCase :List[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
__UpperCamelCase :int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase)
__UpperCamelCase :Dict = AutoFeatureExtractor.from_pretrained(_lowercase , trust_remote_code=_lowercase)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
def UpperCamelCase__ ( self) -> Optional[int]:
try:
AutoConfig.register('''custom''' , _lowercase)
AutoFeatureExtractor.register(_lowercase , _lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase):
AutoFeatureExtractor.register(_lowercase , _lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase :Any = CustomFeatureExtractor.from_pretrained(_lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase)
__UpperCamelCase :str = AutoFeatureExtractor.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> List[Any]:
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = True
try:
AutoConfig.register('''custom''' , _lowercase)
AutoFeatureExtractor.register(_lowercase , _lowercase)
# If remote code is not set, the default is to use local
__UpperCamelCase :List[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
__UpperCamelCase :List[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
__UpperCamelCase :str = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(not hasattr(_lowercase , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 707 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowercase = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
a__ : bool = None
a__ : bool = None
class lowerCamelCase_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
a__ : List[Any] = datasets.Audio()
a__ : int = """audio"""
a__ : str = AudioFolderConfig
a__ : List[str] # definition at the bottom of the script
a__ : int = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__lowercase = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
__lowercase = AUDIO_EXTENSIONS
| 452 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
__A : jnp.ndarray
class _snake_case (nn.Module):
__A : int
__A : Tuple[int] =(16, 32, 96, 2_56)
__A : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
UpperCAmelCase_ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ : str = self.block_out_channels[i]
UpperCAmelCase_ : Tuple = self.block_out_channels[i + 1]
UpperCAmelCase_ : Any = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Optional[int] = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Tuple = blocks
UpperCAmelCase_ : str = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ):
UpperCAmelCase_ : Dict = self.conv_in(_snake_case )
UpperCAmelCase_ : int = nn.silu(_snake_case )
for block in self.blocks:
UpperCAmelCase_ : Union[str, Any] = block(_snake_case )
UpperCAmelCase_ : Tuple = nn.silu(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.conv_out(_snake_case )
return embedding
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : str ="rgb"
__A : Tuple[int] =(16, 32, 96, 2_56)
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Any = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Tuple = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ : int = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : str = jax.random.split(_snake_case )
UpperCAmelCase_ : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.block_out_channels
UpperCAmelCase_ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : str = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Dict = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : List[Any] = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
UpperCAmelCase_ : Any = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = block_out_channels[0]
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : int = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : int = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : List[Any] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ : List[Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
if not is_final_block:
UpperCAmelCase_ : int = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
UpperCAmelCase_ : int = down_blocks
UpperCAmelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
UpperCAmelCase_ : int = block_out_channels[-1]
UpperCAmelCase_ : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=_snake_case ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
UpperCAmelCase_ : List[str] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = True ,_snake_case = False ,):
UpperCAmelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ : Union[str, Any] = jnp.flip(_snake_case ,axis=1 )
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : str = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Union[str, Any] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : List[str] = self.conv_in(_snake_case )
UpperCAmelCase_ : Tuple = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Optional[int] = self.controlnet_cond_embedding(_snake_case )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ : str = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ : int = ()
for down_block_res_sample, controlnet_block in zip(_snake_case ,self.controlnet_down_blocks ):
UpperCAmelCase_ : List[str] = controlnet_block(_snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : str = controlnet_down_block_res_samples
UpperCAmelCase_ : List[Any] = self.controlnet_mid_block(_snake_case )
# 6. scaling
UpperCAmelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_snake_case ,mid_block_res_sample=_snake_case )
| 71 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase ,lowercase ,lowercase ,lowercase )
for node in graph )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> bool:
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase ,lowercase ,lowercase ,lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
with open(__lowerCAmelCase ) as metadata_file:
A_ : Union[str, Any] = json.load(__lowerCAmelCase )
A_ : Optional[Any] = LukeConfig(use_entity_aware_attention=__lowerCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ : Dict = load_original_entity_vocab(__lowerCAmelCase )
# add an entry for [MASK2]
A_ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ : List[Any] = AddedToken("""<ent>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
A_ : List[Any] = AddedToken("""<ent2>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f:
A_ : Union[str, Any] = json.load(__lowerCAmelCase )
A_ : Tuple = """MLukeTokenizer"""
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
A_ : int = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
# Initialize the embeddings of the special tokens
A_ : Tuple = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ : List[str] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ : Any = state_dict["""embeddings.word_embeddings.weight"""]
A_ : int = word_emb[ent_init_index].unsqueeze(0 )
A_ : Any = word_emb[enta_init_index].unsqueeze(0 )
A_ : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ : List[str] = state_dict[bias_name]
A_ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
A_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
A_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ : Union[str, Any] = f'encoder.layer.{layer_index}.attention.self.'
A_ : Union[str, Any] = state_dict[prefix + matrix_name]
A_ : Tuple = state_dict[prefix + matrix_name]
A_ : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ : List[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ : Optional[Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ : Optional[int] = state_dict["""entity_predictions.bias"""]
A_ : Optional[int] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ : Optional[int] = LukeForMaskedLM(config=__lowerCAmelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ : Optional[Any] = state_dict[key]
else:
A_ : Dict = state_dict[key]
A_ : int = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if set(__lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ : Optional[Any] = MLukeTokenizer.from_pretrained(__lowerCAmelCase , task="""entity_classification""" )
A_ : Tuple = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ : str = (0, 9)
A_ : int = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
A_ : Optional[int] = model(**__lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ : Tuple = torch.Size((1, 33, 7_68) )
A_ : List[Any] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ : Optional[Any] = torch.Size((1, 1, 7_68) )
A_ : Any = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ : Tuple = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
A_ : Optional[int] = """Tokyo is the capital of <mask>."""
A_ : Union[str, Any] = (24, 30)
A_ : Union[str, Any] = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
A_ : Union[str, Any] = model(**__lowerCAmelCase )
A_ : Dict = encoding["""input_ids"""][0].tolist()
A_ : str = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCAmelCase )
A_ : Tuple = outputs.entity_logits[0][0].argmax().item()
A_ : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__lowerCAmelCase ) )
model.save_pretrained(__lowerCAmelCase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ : List[str] = [json.loads(__lowerCAmelCase ) for line in open(__lowerCAmelCase )]
A_ : int = {}
for entry in data:
A_ : List[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ : Optional[Any] = entity_id
break
A_ : List[Any] = f'{language}:{entity_name}'
A_ : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCamelCase :int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCamelCase_ ( _lowercase ):
def __lt__( self : str , __A : Tuple ):
return self[-1] < other[-1]
def __eq__( self : Union[str, Any] , __A : Tuple ):
return self[-1] == other[-1]
def __SCREAMING_SNAKE_CASE ( a__ : list ) -> list:
__A : list[Stack] = []
# sort into stacks
for element in collection:
__A : str = Stack([element] )
__A : List[str] = bisect_left(a__ ,a__ )
if i != len(a__ ):
stacks[i].append(a__ )
else:
stacks.append(a__ )
# use a heap-based merge to merge stack efficiently
__A : Optional[Any] = merge(*(reversed(a__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : str = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 17 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
if is_torch_version("<" , "2.0.0" ) or not hasattr(lowerCamelCase_ , "_dynamo" ):
return False
return isinstance(lowerCamelCase_ , torch._dynamo.eval_frame.OptimizedModule )
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] = True ):
"""simple docstring"""
lowerCAmelCase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase__ = is_compiled_module(lowerCamelCase_ )
if is_compiled:
lowerCAmelCase__ = model
lowerCAmelCase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase__ = getattr(lowerCamelCase_ , "forward" )
lowerCAmelCase__ = model.__dict__.pop("_original_forward" , lowerCamelCase_ )
if original_forward is not None:
while hasattr(lowerCamelCase_ , "__wrapped__" ):
lowerCAmelCase__ = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase__ = forward
if getattr(lowerCamelCase_ , "_converted_to_transformer_engine" , lowerCamelCase_ ):
convert_model(lowerCamelCase_ , to_transformer_engine=lowerCamelCase_ )
if is_compiled:
lowerCAmelCase__ = model
lowerCAmelCase__ = compiled_model
return model
def _A ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase_ , lowerCamelCase_ )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase_ , lowerCamelCase_ )
@contextmanager
def _A ( **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
for key, value in kwargs.items():
lowerCAmelCase__ = str(lowerCamelCase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
if not hasattr(lowerCamelCase_ , "__qualname__" ) and not hasattr(lowerCamelCase_ , "__name__" ):
lowerCAmelCase__ = getattr(lowerCamelCase_ , "__class__" , lowerCamelCase_ )
if hasattr(lowerCamelCase_ , "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase_ , "__name__" ):
return obj.__name__
return str(lowerCamelCase_ )
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = destination.setdefault(lowerCamelCase_ , {} )
merge_dicts(lowerCamelCase_ , lowerCamelCase_ )
else:
lowerCAmelCase__ = value
return destination
def _A ( lowerCAmelCase_ : Optional[Any] = None ):
"""simple docstring"""
if port is None:
lowerCAmelCase__ = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 713 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = n
lowerCAmelCase__ = [None] * self.n
lowerCAmelCase__ = 0 # index of the first element
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __len__( self : str ) -> int:
return self.size
def a ( self : Any ) -> bool:
return self.size == 0
def a ( self : Dict ) -> List[str]:
return False if self.is_empty() else self.array[self.front]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCAmelCase__ = data
lowerCAmelCase__ = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : int ) -> Tuple:
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCAmelCase__ = self.array[self.front]
lowerCAmelCase__ = None
lowerCAmelCase__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 125 | 0 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase : Optional[Any] = getLogger(__name__)
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 8 , snake_case__ = 10_24 , snake_case__="val" , snake_case__=None , snake_case__=False , snake_case__="summarization" , snake_case__=None , snake_case__=1 , snake_case__ = None , snake_case__="" , **snake_case__ , ) -> Dict:
lowerCamelCase = str(snake_case__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=snake_case__ )
lowerCamelCase = Path(snake_case__ )
lowerCamelCase = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(snake_case__ )
lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).cuda()
if fpaa:
lowerCamelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case__ , snake_case__ ) # update config with task specific params
lowerCamelCase = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCamelCase = num_return_sequences
lowerCamelCase = AutoTokenizer.from_pretrained(snake_case__ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCamelCase = tokenizer.model_max_length
if prefix is None:
lowerCamelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
lowerCamelCase = SeqaSeqDataset(
snake_case__ , snake_case__ , snake_case__ , max_target_length=10_24 , type_path=snake_case__ , n_obs=snake_case__ , prefix=snake_case__ , **snake_case__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCamelCase = ds.make_sortish_sampler(snake_case__ , distributed=snake_case__ , add_extra_examples=snake_case__ , shuffle=snake_case__ )
lowerCamelCase = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=snake_case__ , collate_fn=ds.collate_fn )
lowerCamelCase = []
for batch in tqdm(snake_case__ ):
lowerCamelCase = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=snake_case__ , num_beams=snake_case__ , **snake_case__ , )
lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCamelCase = batch["""ids"""]
if num_return_sequences > 1:
lowerCamelCase = chunks(snake_case__ , snake_case__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case__ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case__ , snake_case__ )
return results, sampler.num_replicas
def a__ ( ) -> Tuple:
lowerCamelCase = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=snake_case__ , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=snake_case__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=snake_case__ , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=snake_case__ , default=snake_case__ )
parser.add_argument(
"""--type_path""" , type=snake_case__ , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=snake_case__ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=snake_case__ , default=8 , required=snake_case__ , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=snake_case__ , default=-1 , required=snake_case__ , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=snake_case__ , default=snake_case__ , required=snake_case__ , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=snake_case__ , default=1 , required=snake_case__ , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=snake_case__ , default=6_00 , required=snake_case__ , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument("""--tgt_lang""" , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
"""--prefix""" , type=snake_case__ , required=snake_case__ , default=snake_case__ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
lowerCamelCase = time.time()
lowerCamelCase , lowerCamelCase = parser.parse_known_args()
lowerCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case__ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
lowerCamelCase = Path(args.save_dir + """_tmp""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) # this handles locking.
lowerCamelCase = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCamelCase = {}
if args.src_lang is not None:
lowerCamelCase = args.src_lang
if args.tgt_lang is not None:
lowerCamelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case__ )
lowerCamelCase , lowerCamelCase = eval_data_dir(
args.data_dir , snake_case__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case__ , **snake_case__ , )
if args.local_rank <= 0:
lowerCamelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case__ )
lowerCamelCase = gather_results_from_each_node(snake_case__ , snake_case__ , args.sync_timeout )
lowerCamelCase = combine_partial_results(snake_case__ )
if args.num_return_sequences > 1:
lowerCamelCase = save_dir.joinpath("""pseudolabel_results.json""" )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(snake_case__ , snake_case__ )
return
lowerCamelCase = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case__ ) as f:
lowerCamelCase = [x.rstrip() for x in f.readlines()][: len(snake_case__ )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCamelCase = """translation""" in args.task
lowerCamelCase = calculate_bleu if calc_bleu else calculate_rouge
lowerCamelCase = """bleu""" if calc_bleu else """rouge"""
lowerCamelCase = score_fn(snake_case__ , snake_case__ )
lowerCamelCase = len(snake_case__ )
lowerCamelCase = time.time() - start_time
lowerCamelCase = round(runtime / metrics["""n_obs"""] , 4 )
lowerCamelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCamelCase = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
print(snake_case__ )
write_txt_file(snake_case__ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(snake_case__ , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(snake_case__ )
def a__ ( snake_case__ ) -> List:
lowerCamelCase = []
for partial_result in partial_results:
records.extend(snake_case__ )
lowerCamelCase = sorted(snake_case__ , key=lambda snake_case__ : x["id"] )
lowerCamelCase = [x["""pred"""] for x in records]
return preds
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Dict[str, List]]:
lowerCamelCase = time.time()
logger.info("""waiting for all nodes to finish""" )
lowerCamelCase = None
while (time.time() - start_wait) < timeout:
lowerCamelCase = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCamelCase = lmap(snake_case__ , snake_case__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 543 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __a ( SCREAMING_SNAKE_CASE = 2_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase = [0]
__UpperCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__UpperCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__UpperCAmelCase = 0
# an estimate of b, using the quadratic formula
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the triangle number corresponding to b_floor
__UpperCAmelCase = 42
# the triangle number corresponding to b_ceil
__UpperCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__UpperCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__UpperCAmelCase = floor(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = ceil(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = triangle_numbers[b_floor]
__UpperCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_first_guess * triangle_a
__UpperCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_second_guess * triangle_a
__UpperCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 303 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Dict = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
_snake_case: Optional[Any] = load_dataset('ashraq/esc50' )
_snake_case: int = dataset['train']['audio'][-1]['array']
_snake_case: List[Any] = audio_classifier(__snake_case , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Dict = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
_snake_case: Optional[Any] = load_dataset('ashraq/esc50' )
_snake_case: Tuple = dataset['train']['audio'][-1]['array']
_snake_case: Any = audio_classifier(__snake_case , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
_snake_case: List[Any] = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
_snake_case: Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
pass
| 273 |
'''simple docstring'''
A : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase_ ( lowercase__ ) ->str:
if set(lowercase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
_snake_case: List[Any] = ''
for word in coded.split():
while len(lowercase__ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case: Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 | 1 |
import numpy as np
_a: Any = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __UpperCamelCase :
def __init__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = np.array(lowerCAmelCase )
def __A ( self : Any , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = np.where(letter == self.SQUARE )
UpperCAmelCase_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __A ( self : str , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = message.lower()
UpperCAmelCase_ = message.replace(" " , "" )
UpperCAmelCase_ = message.replace("j" , "i" )
UpperCAmelCase_ = np.empty((2, len(lowerCAmelCase )) )
for letter_index in range(len(lowerCAmelCase ) ):
UpperCAmelCase_ = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ = numbers[0]
UpperCAmelCase_ = numbers[1]
UpperCAmelCase_ = first_step.reshape(2 * len(lowerCAmelCase ) )
UpperCAmelCase_ = ""
for numbers_index in range(len(lowerCAmelCase ) ):
UpperCAmelCase_ = int(second_step[numbers_index * 2] )
UpperCAmelCase_ = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase_ = self.numbers_to_letter(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = encoded_message + letter
return encoded_message
def __A ( self : Any , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = message.lower()
message.replace(" " , "" )
UpperCAmelCase_ = np.empty(2 * len(lowerCAmelCase ) )
for letter_index in range(len(lowerCAmelCase ) ):
UpperCAmelCase_ = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ = numbers[0]
UpperCAmelCase_ = numbers[1]
UpperCAmelCase_ = first_step.reshape((2, len(lowerCAmelCase )) )
UpperCAmelCase_ = ""
for numbers_index in range(len(lowerCAmelCase ) ):
UpperCAmelCase_ = int(second_step[0, numbers_index] )
UpperCAmelCase_ = int(second_step[1, numbers_index] )
UpperCAmelCase_ = self.numbers_to_letter(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = decoded_message + letter
return decoded_message | 162 |
import os
def __lowerCAmelCase ( ):
UpperCAmelCase_ = os.path.dirname(os.path.realpath(A ) )
UpperCAmelCase_ = os.path.join(A , "triangle.txt" )
with open(A ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
for line in triangle:
UpperCAmelCase_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(A ) )
a.append(A )
for i in range(1 , len(A ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A , A )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 162 | 1 |
'''simple docstring'''
import os
from math import logaa
def __lowerCamelCase ( __snake_case : str = "base_exp.txt" ) -> int:
"""simple docstring"""
A__ : float =0
A__ : List[Any] =0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ), __snake_case ) ) ):
A__ , A__ : str =list(map(__snake_case, line.split(""",""" ) ) )
if x * logaa(__snake_case ) > largest:
A__ : Optional[int] =x * logaa(__snake_case )
A__ : Optional[Any] =i + 1
return result
if __name__ == "__main__":
print(solution())
| 687 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687 | 1 |
'''simple docstring'''
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
_snake_case = data
_snake_case = None
def __repr__( self ):
return F'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
_snake_case = None
def __iter__( self ):
_snake_case = self.head
while node:
yield node.data
_snake_case = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __getitem__( self , lowerCamelCase ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
_snake_case = self.head
for _ in range(UpperCamelCase__ ):
_snake_case = current.next
_snake_case = data
def UpperCamelCase( self , lowerCamelCase ):
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCamelCase( self , lowerCamelCase ):
self.insert_nth(0 , UpperCamelCase__ )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
_snake_case = Node(UpperCamelCase__ )
if self.head is None:
_snake_case = new_node
elif index == 0:
_snake_case = self.head # link new_node to head
_snake_case = new_node
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = new_node
def UpperCamelCase( self ): # print every node data
print(self )
def UpperCamelCase( self ):
return self.delete_nth(0 )
def UpperCamelCase( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase( self , lowerCamelCase = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
_snake_case = self.head # default first node
if index == 0:
_snake_case = self.head.next
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = temp.next.next
return delete_node.data
def UpperCamelCase( self ):
return self.head is None
def UpperCamelCase( self ):
_snake_case = None
_snake_case = self.head
while current:
# Store the current node's next node.
_snake_case = current.next
# Make the current node's next point backwards
_snake_case = prev
# Make the previous node be the current node
_snake_case = current
# Make the current node the next node (to progress iteration)
_snake_case = next_node
# Return prev in order to put the head at the end
_snake_case = prev
def snake_case_ ( ):
'''simple docstring'''
_snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def snake_case_ ( ):
'''simple docstring'''
_snake_case = [
-9,
1_00,
Node(77_34_51_12 ),
'''dlrow olleH''',
7,
55_55,
0,
-192.55555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
_snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_snake_case = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_snake_case = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def snake_case_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
_snake_case = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE__ )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
_snake_case = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE__ )
print(f'''length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}''' )
if __name__ == "__main__":
main()
| 672 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ = 1_2_8_0_2_2
lowercase__ = 1_2_8_0_2_8
@require_sentencepiece
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MaMaaaTokenizer
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
snake_case : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = '''</s>'''
snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''This is a test''' )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """facebook/m2m100_418M"""
lowerCamelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def lowerCAmelCase ( cls : List[Any] ) -> int:
"""simple docstring"""
snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
snake_case : List[str] = 1
return cls
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = '''en'''
snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case : int = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''en'''
snake_case : int = '''fr'''
snake_case : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : List[str] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case : Optional[int] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 638 | 0 |
import datasets
__lowerCamelCase : str = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__lowerCamelCase : Optional[Any] = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__lowerCamelCase : Optional[Any] = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def lowercase__ ( __A: Optional[Any] ,__A: int ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ) -> Optional[Any]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
| 716 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCAmelCase : int = 768 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
lowercase : Tuple =nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
lowercase : Union[str, Any] =nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =(embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =(embeds * self.std) + self.mean
return embeds
| 94 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 0 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ) -> Dict:
__a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a = 16
elif accelerator.mixed_precision != "no":
__a = 8
else:
__a = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase__ ) == "1":
__a = 2
# Initialize accelerator
__a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a = model.to(accelerator.device )
# Instantiate optimizer
__a = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
__a , __a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase ( ) -> str:
__a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.2 , SCREAMING_SNAKE_CASE=0.2 ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = bp_numa
A : Any = bp_numa
A : Tuple = bp_numa
A : Optional[int] = conva_get[:2]
A : Union[str, Any] = conva_get[2]
A : Any = size_pa
A : Any = rate_w
A : Any = rate_t
A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A : int = -2 * np.random.rand(self.conva[1] ) + 1
A : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
A : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
pickle.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F'Model saved: {save_path}' )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A : Any = pickle.load(SCREAMING_SNAKE_CASE ) # noqa: S301
A : Dict = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
A : Any = model_dic.get('''size_pooling1''' )
A : List[str] = model_dic.get('''num_bp1''' )
A : Optional[Any] = model_dic.get('''num_bp2''' )
A : List[Any] = model_dic.get('''num_bp3''' )
A : List[str] = model_dic.get('''rate_weight''' )
A : List[str] = model_dic.get('''rate_thre''' )
# create model instance
A : Optional[int] = CNN(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# modify model parameter
A : Dict = model_dic.get('''w_conv1''' )
A : Optional[int] = model_dic.get('''wkj''' )
A : Dict = model_dic.get('''vji''' )
A : List[str] = model_dic.get('''thre_conv1''' )
A : Optional[int] = model_dic.get('''thre_bp2''' )
A : List[str] = model_dic.get('''thre_bp3''' )
return conv_ins
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return round(SCREAMING_SNAKE_CASE , 3 )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = convs[0]
A : Any = convs[1]
A : Optional[Any] = np.shape(SCREAMING_SNAKE_CASE )[0]
# get the data slice of original image data, data_focus
A : Union[str, Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE ):
A : Any = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(SCREAMING_SNAKE_CASE )
# calculate the feature map of every single kernel, and saved as list of matrix
A : str = []
A : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(SCREAMING_SNAKE_CASE ):
A : Tuple = []
for i_focus in range(len(SCREAMING_SNAKE_CASE ) ):
A : int = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = np.asmatrix(SCREAMING_SNAKE_CASE ).reshape(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
data_featuremap.append(SCREAMING_SNAKE_CASE )
# expanding the data slice to One dimenssion
A : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE )
return focus_list, data_featuremap
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="average_pool" ) -> Dict:
"""simple docstring"""
A : List[Any] = len(featuremaps[0] )
A : Any = int(size_map / size_pooling )
A : int = []
for i_map in range(len(SCREAMING_SNAKE_CASE ) ):
A : List[Any] = featuremaps[i_map]
A : List[str] = []
for i_focus in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(SCREAMING_SNAKE_CASE ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(SCREAMING_SNAKE_CASE ) )
A : List[Any] = np.asmatrix(SCREAMING_SNAKE_CASE ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
featuremap_pooled.append(SCREAMING_SNAKE_CASE )
return featuremap_pooled
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A : List[Any] = np.shape(data[i] )
A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(SCREAMING_SNAKE_CASE )
A : Tuple = np.asarray(SCREAMING_SNAKE_CASE )
return data_expanded
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE )
A : Any = np.shape(SCREAMING_SNAKE_CASE )
A : Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : int = []
A : str = 0
for i_map in range(SCREAMING_SNAKE_CASE ):
A : Optional[int] = np.ones((size_map, size_map) )
for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Tuple = pd_pool[
i_pool
]
A : str = i_pool + 1
A : Union[str, Any] = np.multiply(
SCREAMING_SNAKE_CASE , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(SCREAMING_SNAKE_CASE )
return pd_all
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=bool ) -> List[Any]:
"""simple docstring"""
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(SCREAMING_SNAKE_CASE )) )
print((''' - - Shape: Teach_Data ''', np.shape(SCREAMING_SNAKE_CASE )) )
A : int = 0
A : Union[str, Any] = []
A : Dict = 10000
while rp < n_repeat and mse >= error_accuracy:
A : Tuple = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(SCREAMING_SNAKE_CASE ) ):
# print('------------Learning Image: %d--------------'%p)
A : str = np.asmatrix(datas_train[p] )
A : Any = np.asarray(datas_teach[p] )
A, A : Union[str, Any] = self.convolute(
SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A : int = self.pooling(SCREAMING_SNAKE_CASE , self.size_poolinga )
A : int = np.shape(SCREAMING_SNAKE_CASE )
A : str = self._expand(SCREAMING_SNAKE_CASE )
A : Dict = data_bp_input
A : int = np.dot(SCREAMING_SNAKE_CASE , self.vji.T ) - self.thre_bpa
A : Any = self.sig(SCREAMING_SNAKE_CASE )
A : int = np.dot(SCREAMING_SNAKE_CASE , self.wkj.T ) - self.thre_bpa
A : List[Any] = self.sig(SCREAMING_SNAKE_CASE )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
A : Optional[int] = np.multiply(
np.dot(SCREAMING_SNAKE_CASE , self.wkj ) , np.multiply(SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
A : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE , self.vji )
A : List[str] = pd_i_all / (self.size_poolinga * self.size_poolinga)
A : Optional[Any] = pd_conva_pooled.T.getA().tolist()
A : Optional[Any] = self._calculate_gradient_from_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A : Any = self._expand_mat(pd_conva_all[k_conv] )
A : List[Any] = self.rate_weight * np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A : List[str] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A : List[str] = self.thre_bpa - pd_k_all * self.rate_thre
A : List[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A : str = rp + 1
A : Union[str, Any] = error_count / patterns
all_mse.append(SCREAMING_SNAKE_CASE )
def draw_error():
A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(SCREAMING_SNAKE_CASE , '''+-''' )
plt.plot(SCREAMING_SNAKE_CASE , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(SCREAMING_SNAKE_CASE , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Optional[int] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(SCREAMING_SNAKE_CASE )) )
for p in range(len(SCREAMING_SNAKE_CASE ) ):
A : Optional[Any] = np.asmatrix(datas_test[p] )
A, A : Tuple = self.convolute(
SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A : List[str] = self.pooling(SCREAMING_SNAKE_CASE , self.size_poolinga )
A : str = self._expand(SCREAMING_SNAKE_CASE )
A : Optional[Any] = data_bp_input
A : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
A : Optional[Any] = self.sig(SCREAMING_SNAKE_CASE )
A : Any = bp_outa * self.wkj.T - self.thre_bpa
A : Union[str, Any] = self.sig(SCREAMING_SNAKE_CASE )
produce_out.extend(bp_outa.getA().tolist() )
A : Optional[int] = [list(map(self.do_round , SCREAMING_SNAKE_CASE ) ) for each in produce_out]
return np.asarray(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = np.asmatrix(SCREAMING_SNAKE_CASE )
A, A : Union[str, Any] = self.convolute(
SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A : Optional[Any] = self.pooling(SCREAMING_SNAKE_CASE , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 634 |
'''simple docstring'''
import random
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = a[left_index]
A : List[str] = left_index + 1
for j in range(left_index + 1 , snake_case__ ):
if a[j] < pivot:
A, A : Optional[int] = a[i], a[j]
i += 1
A, A : str = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if left < right:
A : Optional[Any] = random.randint(snake_case__ , right - 1 )
A, A : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A : Any = partition(snake_case__ , snake_case__ , snake_case__ )
quick_sort_random(
snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Any = input('''Enter numbers separated by a comma:\n''' ).strip()
A : List[str] = [int(snake_case__ ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case__ , 0 , len(snake_case__ ) )
print(snake_case__ )
if __name__ == "__main__":
main()
| 634 | 1 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = 100 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = 2
for i in range(2,max_n + 1 ):
_UpperCAmelCase = pre_numerator
_UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
_UpperCAmelCase = cur_numerator
_UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('%H:%M:%S' )
return timestamp
| 494 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Any = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE : List[str] = ScoreSdeVePipeline(unet=lowercase__ , scheduler=lowercase__ )
sde_ve.to(lowercase__ )
sde_ve.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase__ ).images
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase__ , return_dict=lowercase__ )[
0
]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'google/ncsnpp-church-256'
SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : str = ScoreSdeVeScheduler.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = ScoreSdeVePipeline(unet=lowercase__ , scheduler=lowercase__ )
sde_ve.to(lowercase__ )
sde_ve.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=lowercase__ ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 251 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :List[Any] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Dict = "camembert"
def __init__( self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> str:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 251 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
def __init__( self: Optional[int] , __UpperCamelCase: str , __UpperCamelCase: Optional[Any]=2 , __UpperCamelCase: Any=32 , __UpperCamelCase: Optional[int]=16 , __UpperCamelCase: Optional[Any]=3 , __UpperCamelCase: List[Any]=True , __UpperCamelCase: int=True , __UpperCamelCase: Union[str, Any]=32 , __UpperCamelCase: Optional[int]=4 , __UpperCamelCase: List[str]=[0, 1, 2, 3] , __UpperCamelCase: Any=4 , __UpperCamelCase: Tuple=37 , __UpperCamelCase: Optional[Any]="gelu" , __UpperCamelCase: Any=0.1 , __UpperCamelCase: str=0.1 , __UpperCamelCase: Optional[Any]=0.0_2 , __UpperCamelCase: int=3 , __UpperCamelCase: List[Any]=[1, 384, 24, 24] , __UpperCamelCase: Optional[int]=True , __UpperCamelCase: Union[str, Any]=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = backbone_out_indices
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = num_labels
_a = backbone_featmap_shape
_a = scope
_a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def _A ( self: Any ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def _A ( self: Optional[int] ):
_a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _A ( self: List[Any] , __UpperCamelCase: str , __UpperCamelCase: Any , __UpperCamelCase: List[Any] ):
_a = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self: List[Any] , __UpperCamelCase: Dict , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Optional[Any] ):
_a = self.num_labels
_a = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _A ( self: Optional[int] , __UpperCamelCase: Dict , __UpperCamelCase: List[Any] , __UpperCamelCase: Dict ):
_a = self.num_labels
_a = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _A ( self: List[Any] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
a: Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a: Any = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a: Dict = False
a: Union[str, Any] = False
a: Optional[int] = False
def _A ( self: int ):
_a = DPTModelTester(self )
_a = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _A ( self: Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _A ( self: List[Any] ):
pass
def _A ( self: Union[str, Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _A ( self: Any ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCamelCase )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _A ( self: Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _A ( self: str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _A ( self: Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _A ( self: Optional[int] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
if model_class in get_values(__UpperCamelCase ):
continue
_a = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_a = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_a = model(**__UpperCamelCase ).loss
loss.backward()
def _A ( self: Optional[int] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = False
_a = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_a = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_a = model(**__UpperCamelCase ).loss
loss.backward()
def _A ( self: str ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_a = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A ( self: int ):
pass
@slow
def _A ( self: Dict ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _A ( self: Tuple ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = '''add'''
with self.assertRaises(__UpperCamelCase ):
_a = DPTForDepthEstimation(__UpperCamelCase )
def __snake_case ( ) -> Optional[int]:
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: List[Any] ):
_a = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_a = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__UpperCamelCase )
_a = prepare_img()
_a = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_a = model(**__UpperCamelCase )
_a = outputs.predicted_depth
# verify the predicted depth
_a = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_a = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
| 719 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __snake_case ( _UpperCamelCase=None ) -> List[str]:
_a = argparse.ArgumentParser(add_help=_UpperCamelCase , allow_abbrev=_UpperCamelCase )
# The main config parser
_a = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
_a = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase , parents=[parent_parser] )
update_command_parser(_UpperCamelCase , parents=[parent_parser] )
return config_parser
def __snake_case ( ) -> Optional[Any]:
_a = get_config_parser()
_a = config_parser.parse_args()
if not hasattr(_UpperCamelCase , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 346 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ):
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = apply_ocr
def A__ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A__ ( self ):
UpperCAmelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def A__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase , "apply_ocr" ) )
def A__ ( self ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def A__ ( self ):
pass
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def A__ ( self ):
# with apply_OCR = True
UpperCAmelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCAmelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
UpperCAmelCase_ = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 579 |
"""simple docstring"""
from string import ascii_uppercase
_lowerCAmelCase :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Dict = 0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
_UpperCAmelCase : Optional[int] = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
_UpperCAmelCase : Any = str(UpperCamelCase__ )
new_value += actual_value
_UpperCAmelCase : Tuple = num // base
_UpperCAmelCase : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 506 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=10 , a__=3 , a__=2 , a__=2 , a__=2 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=0.9 , a__=None , ):
A_ : Tuple = parent
A_ : Union[str, Any] = batch_size
A_ : str = image_size
A_ : Union[str, Any] = num_channels
A_ : List[str] = patch_size
A_ : Optional[Any] = tubelet_size
A_ : List[Any] = num_frames
A_ : str = is_training
A_ : List[Any] = use_labels
A_ : List[str] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Dict = hidden_act
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = type_sequence_label_size
A_ : int = initializer_range
A_ : Dict = mask_ratio
A_ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A_ : int = (image_size // patch_size) ** 2
A_ : Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A_ : Dict = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
A_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[int] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Dict = VideoMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : Dict = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[Any] = VideoMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.num_masks,) )
A_ : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A_ : int = mask.expand(self.batch_size , -1 ).bool()
A_ : List[Any] = model(a__ , a__ )
# model only returns predictions for masked patches
A_ : Union[str, Any] = mask.sum().item()
A_ : Tuple = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ : Optional[Any] = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = VideoMAEModelTester(self )
A_ : Any = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self , a__ , a__ , a__=False ):
A_ : Optional[Any] = copy.deepcopy(a__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A_ : List[Any] = torch.ones((self.model_tester.num_masks,) )
A_ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A_ : Union[str, Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
A_ : int = bool_masked_pos.to(a__ )
if return_labels:
if model_class in [
*get_values(a__ ),
]:
A_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(a__ )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
@slow
def _lowerCamelCase ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = VideoMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCamelCase ( self ):
if not self.has_attentions:
pass
else:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = True
for model_class in self.all_model_classes:
A_ : str = self.model_tester.seq_length - self.model_tester.num_masks
A_ : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A_ : Optional[int] = True
A_ : Optional[Any] = False
A_ : List[Any] = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : List[Any] = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Any = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ : Union[str, Any] = len(a__ )
# Check attention is always last and order is fine
A_ : Optional[Any] = True
A_ : int = True
A_ : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
A_ : Optional[Any] = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
A_ : str = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : Optional[Any] = outputs.hidden_states
A_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a__ ) , a__ )
A_ : str = self.model_tester.seq_length - self.model_tester.num_masks
A_ : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Tuple = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
A_ : Optional[int] = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
A_ : List[str] = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
a__ )
A_ : Any = self.default_image_processor
A_ : Optional[Any] = prepare_video()
A_ : Union[str, Any] = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
A_ : Any = model(**a__ )
# verify the logits
A_ : List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
A_ : int = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(a__ )
A_ : str = self.default_image_processor
A_ : Any = prepare_video()
A_ : Union[str, Any] = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# add boolean mask, indicating which patches to mask
A_ : Optional[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Dict = torch.load(a__ )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**a__ )
# verify the logits
A_ : int = torch.Size([1, 1408, 1536] )
A_ : Union[str, Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a__ )
self.assertEqual(outputs.logits.shape , a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A_ : Optional[Any] = torch.tensor([0.5142] , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A_ : Optional[int] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=a__ ).to(
a__ )
with torch.no_grad():
A_ : Optional[Any] = model(**a__ )
A_ : List[Any] = torch.tensor(torch.tensor([0.6469] ) , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
| 706 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """PoolFormerConfig"""
# Base docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
A_ : int = 1 - drop_prob
A_ : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A_ : Optional[int] = keep_prob + torch.rand(_lowerCAmelCase ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
A_ : Optional[int] = input.div(_lowerCAmelCase ) * random_tensor
return output
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ = None ):
super().__init__()
A_ : int = drop_prob
def _lowerCamelCase ( self , a__ ):
return drop_path(a__ , self.drop_prob , self.training )
def _lowerCamelCase ( self ):
return "p={}".format(self.drop_prob )
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__=None ):
super().__init__()
A_ : Dict = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size)
A_ : Optional[Any] = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride)
A_ : int = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding)
A_ : Dict = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ )
A_ : Optional[Any] = norm_layer(a__ ) if norm_layer else nn.Identity()
def _lowerCamelCase ( self , a__ ):
A_ : Dict = self.projection(a__ )
A_ : List[Any] = self.norm(a__ )
return embeddings
class _UpperCAmelCase ( nn.GroupNorm ):
def __init__( self , a__ , **a__ ):
super().__init__(1 , a__ , **a__ )
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
A_ : Optional[int] = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ )
def _lowerCamelCase ( self , a__ ):
return self.pool(a__ ) - hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ ):
super().__init__()
A_ : str = nn.Convad(a__ , a__ , 1 )
A_ : List[str] = nn.Convad(a__ , a__ , 1 )
A_ : Optional[Any] = PoolFormerDropPath(a__ )
if isinstance(config.hidden_act , a__ ):
A_ : int = ACTaFN[config.hidden_act]
else:
A_ : Optional[int] = config.hidden_act
def _lowerCamelCase ( self , a__ ):
A_ : Tuple = self.conva(a__ )
A_ : str = self.act_fn(a__ )
A_ : Union[str, Any] = self.drop(a__ )
A_ : int = self.conva(a__ )
A_ : Any = self.drop(a__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ ):
super().__init__()
A_ : Dict = PoolFormerPooling(a__ )
A_ : Dict = PoolFormerOutput(a__ , a__ , a__ , a__ )
A_ : Dict = PoolFormerGroupNorm(a__ )
A_ : Dict = PoolFormerGroupNorm(a__ )
# Useful for training neural nets
A_ : Union[str, Any] = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity()
A_ : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
A_ : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
A_ : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
def _lowerCamelCase ( self , a__ ):
if self.use_layer_scale:
A_ : List[str] = self.pooling(self.before_norm(a__ ) )
A_ : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A_ : List[Any] = hidden_states + self.drop_path(a__ )
A_ : Tuple = ()
A_ : List[Any] = self.output(self.after_norm(a__ ) )
A_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A_ : Optional[Any] = hidden_states + self.drop_path(a__ )
A_ : int = (output,) + outputs
return outputs
else:
A_ : Union[str, Any] = self.drop_path(self.pooling(self.before_norm(a__ ) ) )
# First residual connection
A_ : Any = pooling_output + hidden_states
A_ : Tuple = ()
# Second residual connection inside the PoolFormerOutput block
A_ : Dict = self.drop_path(self.output(self.after_norm(a__ ) ) )
A_ : int = hidden_states + layer_output
A_ : str = (output,) + outputs
return outputs
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
A_ : Union[str, Any] = config
# stochastic depth decay rule
A_ : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A_ : Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A_ : Optional[int] = nn.ModuleList(a__ )
# Transformer blocks
A_ : Union[str, Any] = []
A_ : Union[str, Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A_ : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a__ ) )
A_ : str = nn.ModuleList(a__ )
def _lowerCamelCase ( self , a__ , a__=False , a__=True ):
A_ : Optional[int] = () if output_hidden_states else None
A_ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A_ , A_ : Union[str, Any] = layers
# Get patch embeddings from hidden_states
A_ : Tuple = embedding_layer(a__ )
# Send the embeddings through the blocks
for _, blk in enumerate(a__ ):
A_ : List[str] = blk(a__ )
A_ : Union[str, Any] = layer_outputs[0]
if output_hidden_states:
A_ : Dict = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = PoolFormerConfig
a = '''poolformer'''
a = '''pixel_values'''
a = True
def _lowerCamelCase ( self , a__ ):
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowerCamelCase ( self , a__ , a__=False ):
if isinstance(a__ , a__ ):
A_ : List[Any] = value
_lowerCAmelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
A_ : List[Any] = config
A_ : Optional[int] = PoolFormerEncoder(a__ )
# Initialize weights and apply final processing
self.post_init()
def _lowerCamelCase ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self , a__ = None , a__ = None , a__ = None , ):
A_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A_ : Optional[Any] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , )
A_ : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , )
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
A_ : int = nn.Linear(config.hidden_size , config.hidden_size )
def _lowerCamelCase ( self , a__ ):
A_ : Any = self.dense(a__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
A_ : str = config.num_labels
A_ : Union[str, Any] = PoolFormerModel(a__ )
# Final norm
A_ : str = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A_ : Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self , a__ = None , a__ = None , a__ = None , a__ = None , ):
A_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Optional[int] = self.poolformer(
a__ , output_hidden_states=a__ , return_dict=a__ , )
A_ : List[str] = outputs[0]
A_ : int = self.classifier(self.norm(a__ ).mean([-2, -1] ) )
A_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Tuple = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Tuple = """single_label_classification"""
else:
A_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
A_ : int = MSELoss()
if self.num_labels == 1:
A_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : Tuple = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
A_ : str = CrossEntropyLoss()
A_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : List[str] = BCEWithLogitsLoss()
A_ : Optional[Any] = loss_fct(a__ , a__ )
if not return_dict:
A_ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 481 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=_a , repr=_a )
def __call__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 429 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCamelCase : Optional[Any] = "true"
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=82 , UpperCamelCase_ : List[str]=16 ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase : Optional[Any] = RegressionModel()
_lowerCAmelCase : List[Any] = deepcopy(UpperCamelCase_ )
_lowerCAmelCase : Tuple = RegressionDataset(length=UpperCamelCase_ )
_lowerCAmelCase : int = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : Dict = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return model, ddp_model, dataloader
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_lowerCAmelCase : str = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(UpperCamelCase_ : Optional[int] ):
_lowerCAmelCase : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : str = dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_lowerCAmelCase : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : Optional[int] ):
if use_longest:
return tokenizer.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(UpperCamelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=16 )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Accelerator(dispatch_batches=UpperCamelCase_ , split_batches=UpperCamelCase_ )
_lowerCAmelCase : Any = get_dataloader(UpperCamelCase_ , not dispatch_batches )
_lowerCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : str = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = batch.values()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCamelCase_ )
targs.append(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Dict = torch.cat(UpperCamelCase_ ), torch.cat(UpperCamelCase_ )
return logits, targs
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : Any=82 , UpperCamelCase_ : str=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Dict=16 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = get_basic_setup(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_predictions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert (
len(UpperCamelCase_ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase_ )}"
def _UpperCAmelCase (UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_mrpc_setup(UpperCamelCase_ , UpperCamelCase_ )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = setup["""no"""]
model.to(UpperCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(UpperCamelCase_ )
with torch.inference_mode():
_lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCamelCase_ , references=batch["""labels"""] )
_lowerCAmelCase : int = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : Dict = model(**UpperCamelCase_ )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Optional[int] = batch["""labels"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(UpperCamelCase_ , UpperCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : List[str] = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(UpperCamelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_lowerCAmelCase : Optional[int] = Accelerator()
test_torch_metrics(UpperCamelCase_ , 512 )
accelerator.state._reset_state()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 429 | 1 |
import sys
__SCREAMING_SNAKE_CASE : int = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def snake_case (__lowercase = N ) -> int:
'''simple docstring'''
_snake_case : Union[str, Any] = -sys.maxsize - 1
for i in range(len(__lowercase ) - 12 ):
_snake_case : List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_snake_case : Optional[int] = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''') | 580 | import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : str = random.Random()
def snake_case (__lowercase , __lowercase=1.0 , __lowercase=None , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
_snake_case : Any = global_rng
_snake_case : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=400 , lowercase_=2_000 , lowercase_=10 , lowercase_=160 , lowercase_=8 , lowercase_=0.0 , lowercase_=4_000 , lowercase_=False , lowercase_=True , ):
_snake_case : List[Any] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : Optional[Any] = min_seq_length
_snake_case : int = max_seq_length
_snake_case : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case : Optional[Any] = padding_value
_snake_case : List[str] = sampling_rate
_snake_case : Any = return_attention_mask
_snake_case : str = do_normalize
_snake_case : Any = feature_size
_snake_case : Optional[int] = chunk_length
_snake_case : Optional[Any] = hop_length
def UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , lowercase_=False , lowercase_=False ):
def _flatten(lowercase_ ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
_snake_case : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case : Optional[Any] = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase ( self ):
_snake_case : int = WhisperFeatureExtractionTester(self )
def UpperCamelCase ( self ):
_snake_case : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : int = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
_snake_case : Any = self.feature_extraction_class.from_pretrained(lowercase_ )
_snake_case : str = feat_extract_first.to_dict()
_snake_case : Any = feat_extract_second.to_dict()
_snake_case : List[str] = feat_extract_first.mel_filters
_snake_case : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Dict = os.path.join(lowercase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase_ )
_snake_case : str = self.feature_extraction_class.from_json_file(lowercase_ )
_snake_case : Optional[int] = feat_extract_first.to_dict()
_snake_case : Optional[Any] = feat_extract_second.to_dict()
_snake_case : Optional[int] = feat_extract_first.mel_filters
_snake_case : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_snake_case : int = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case : Any = feature_extractor(lowercase_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_snake_case : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_snake_case : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
_snake_case : int = feature_extractor(lowercase_ , return_tensors="np" ).input_features
_snake_case : Any = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case : Any = np.asarray(lowercase_ )
_snake_case : Any = feature_extractor(lowercase_ , return_tensors="np" ).input_features
_snake_case : Dict = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test truncation required
_snake_case : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_snake_case : str = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
_snake_case : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_snake_case : Dict = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
_snake_case : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
_snake_case : Optional[Any] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def UpperCamelCase ( self ):
import torch
_snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_snake_case : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case : int = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_snake_case : Any = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Optional[int] = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
_snake_case : Tuple = self._load_datasamples(1 )
_snake_case : Dict = WhisperFeatureExtractor()
_snake_case : int = feature_extractor(lowercase_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1e-4 ) )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Optional[Any] = self._load_datasamples(1 )[0]
_snake_case : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_snake_case : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1e-3 ) ) | 580 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 | '''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCAmelCase : Optional[int] = '''\
Text data.
Second line of data.'''
_UpperCAmelCase : Optional[Any] = '''file'''
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
_A = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_A = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ):
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , 'w' ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] ):
_A = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / 'cache'
_A = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
_A = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
_A = f.read()
with open(__snake_case ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[str] ):
_A = 'custom_cache'
_A = 'custom_extracted_dir'
_A = tmp_path / 'custom_extracted_path'
if default_extracted:
_A = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __snake_case )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
_A = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
# absolute path
_A = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
_A = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
# absolute path
_A = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
_A = './__missing_file__.txt'
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__snake_case ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( ):
with pytest.raises(__snake_case ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
_A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
http_get('https://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
_A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
ftp_get('ftp://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
_A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
fsspec_get('s3://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head('s3://huggingface.co' )
| 107 | 1 |
def lowercase ( _a ,_a ) -> float:
return base * power(_a ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
_lowerCAmelCase = int(input("""Enter the base: """).strip())
_lowerCAmelCase = int(input("""Enter the exponent: """).strip())
_lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowerCAmelCase = 1 / result
print(F"""{base} to the power of {exponent} is {result}""") | 306 |
_lowerCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowerCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowercase ( _a ,_a ,_a ) -> list[int]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_a ,_a ,_a )
order.append(_a )
return order
def lowercase ( _a ,_a ,_a ) -> list[int]:
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: str = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_a ,_a ,_a )
return component
def lowercase ( _a ) -> list[list[int]]:
UpperCAmelCase_: Union[str, Any] = len(_a ) * [False]
UpperCAmelCase_: dict[int, list[int]] = {vert: [] for vert in range(len(_a ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_a )
UpperCAmelCase_: Optional[int] = []
for i, was_visited in enumerate(_a ):
if not was_visited:
order += topology_sort(_a ,_a ,_a )
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Union[str, Any] = len(_a ) * [False]
for i in range(len(_a ) ):
UpperCAmelCase_: str = order[len(_a ) - i - 1]
if not visited[vert]:
UpperCAmelCase_: List[str] = find_components(_a ,_a ,_a )
components_list.append(_a )
return components_list | 306 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = ort.SessionOptions()
lowercase_ = False
return options
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""")
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""")
lowercase_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """A red cat sitting on a park bench"""
lowercase_ = np.random.RandomState(0)
lowercase_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase_ , output_type="""np""" , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""")
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""")
lowercase_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""")
lowercase_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """A red cat sitting on a park bench"""
lowercase_ = np.random.RandomState(0)
lowercase_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase_ , output_type="""np""" , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 567 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
super().__init__()
lowercase_ = torchvision.models.resnetaaa(pretrained=lowerCAmelCase_)
lowercase_ = list(model.children())[:-2]
lowercase_ = nn.Sequential(*lowerCAmelCase_)
lowercase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.pool(self.model(lowerCAmelCase_))
lowercase_ = torch.flatten(lowerCAmelCase_ , start_dim=2)
lowercase_ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = [json.loads(lowerCAmelCase_) for l in open(lowerCAmelCase_)]
lowercase_ = os.path.dirname(lowerCAmelCase_)
lowercase_ = tokenizer
lowercase_ = labels
lowercase_ = len(lowerCAmelCase_)
lowercase_ = max_seq_length
lowercase_ = transforms
def __len__( self : List[Any]):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Tuple , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCAmelCase_))
lowercase_ , lowercase_ , lowercase_ = sentence[0], sentence[1:-1], sentence[-1]
lowercase_ = sentence[: self.max_seq_length]
lowercase_ = torch.zeros(self.n_classes)
lowercase_ = 1
lowercase_ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""])).convert("""RGB""")
lowercase_ = self.transforms(lowerCAmelCase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Counter()
for row in self.data:
label_freqs.update(row["""label"""])
return label_freqs
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = [len(row["""sentence"""] ) for row in batch]
lowercase_ , lowercase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase )
lowercase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
lowercase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
lowercase_ = input_row["""sentence"""]
lowercase_ = 1
lowercase_ = torch.stack([row["""image"""] for row in batch] )
lowercase_ = torch.stack([row["""label"""] for row in batch] )
lowercase_ = torch.stack([row["""image_start_token"""] for row in batch] )
lowercase_ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 567 | 1 |
'''simple docstring'''
import numpy as np
import datasets
a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = np.array(UpperCamelCase__ )
lowercase_ = np.array(UpperCamelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
lowercase_ = X - np.mean(UpperCamelCase__ )
lowercase_ = np.cov(reference_distribution.T )
try:
lowercase_ = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
lowercase_ = np.linalg.pinv(UpperCamelCase__ )
lowercase_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 721 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt"}
__magic_name__ = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
__magic_name__ = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
__magic_name__ = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_INIT_CONFIGURATION
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ConvBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , normalizer_state.pop("""type"""))
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = do_lower_case
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 155 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
__lowercase : List[Any] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_input_output_texts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
__SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = BertJapaneseTokenizer
__lowercase : int = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , **lowerCAmelCase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 155 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class a :
"""simple docstring"""
def __init__( self ) -> None:
_A = [2, 1, 2, -1]
_A = [1, 2, 3, 4]
def UpperCAmelCase ( self ) -> list[float]:
_A = len(self.first_signal )
_A = len(self.second_signal )
_A = max(A__ , A__ )
# create a zero matrix of max_length x max_length
_A = [[0] * max_length for i in range(A__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(A__ ):
_A = deque(self.second_signal )
rotated_signal.rotate(A__ )
for j, item in enumerate(A__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
_A = np.matmul(np.transpose(A__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(A__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 85 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> int:
def count_of_possible_combinations(SCREAMING_SNAKE_CASE :int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> int:
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCAmelCase : int = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE )
for item in array )
__lowerCAmelCase : Optional[Any] = answer
return answer
__lowerCAmelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> int:
__lowerCAmelCase : Tuple = [0] * (target + 1)
__lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = 3
_UpperCAmelCase = 5
_UpperCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 504 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = input_str.split("_" )
_lowerCAmelCase = 0 if use_pascal else 1
_lowerCAmelCase = words[start_index:]
_lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 489 |
'''simple docstring'''
from math import isqrt, loga
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def __a(SCREAMING_SNAKE_CASE_ : int = 800800 , SCREAMING_SNAKE_CASE_ : int = 800800 ):
'''simple docstring'''
_lowerCAmelCase = degree * loga(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = calculate_prime_numbers(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __snake_case ( lowercase : str , lowercase : List[str] , lowercase : Tuple=None , lowercase : List[str]=None ):
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
snake_case = OPTConfig
snake_case = {}
snake_case = """gelu"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=99 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=16 , UpperCAmelCase_=16 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = embed_dim
snake_case_ = word_embed_proj_dim
snake_case_ = False
def _lowercase ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase_ , **self.config_updates , )
snake_case_ = prepare_opt_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TFOPTModel(config=UpperCAmelCase_ )
snake_case_ = inputs_dict["input_ids"]
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict["attention_mask"][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = 1_0
def _lowercase ( self ):
snake_case_ = TFOPTModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def _lowercase ( self ):
self.config_tester.run_common_tests()
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase_ , UpperCAmelCase_ ):
if hasattr(UpperCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case_ = model_class(config=UpperCAmelCase_ )
snake_case_ = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
snake_case_ = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase_ )
snake_case_ = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
snake_case_ = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase_ )
# check that weights remain the same after resizing
snake_case_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ = False
self.assertTrue(UpperCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase_ )
snake_case_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ = False
self.assertTrue(UpperCAmelCase_ )
def __snake_case ( lowercase : Any ):
return tf.constant(lowercase , dtype=tf.intaa )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
snake_case = 9_9
def _lowercase ( self ):
snake_case_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case_ = input_ids.shape[0]
snake_case_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self ):
snake_case_ = TFOPTModel.from_pretrained("facebook/opt-350m" )
snake_case_ = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case_ = tf.not_equal(UpperCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
snake_case_ = model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).last_hidden_state
snake_case_ = (1, 11, 5_12)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4e-3 ) )
snake_case_ = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
snake_case_ = xla_generate(UpperCAmelCase_ , UpperCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
super().setUp()
snake_case_ = "facebook/opt-350m"
def _lowercase ( self ):
snake_case_ = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case_ = GPTaTokenizer.from_pretrained(self.path_model )
snake_case_ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case_ = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-4 ) )
snake_case_ = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
snake_case_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ):
snake_case_ = "facebook/opt-125m"
snake_case_ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ = []
snake_case_ = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
snake_case_ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
snake_case_ = tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids
snake_case_ = model.generate(UpperCAmelCase_ , max_length=10 )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = "facebook/opt-350m"
snake_case_ = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
snake_case_ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
snake_case_ = "left"
# use different length sentences to test batching
snake_case_ = [
"Hello, my dog is a little",
"Today, I",
]
snake_case_ = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ )
snake_case_ = inputs["input_ids"]
snake_case_ = model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs["attention_mask"] )
snake_case_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
snake_case_ = model.generate(input_ids=UpperCAmelCase_ )
snake_case_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
snake_case_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
snake_case_ = model.generate(input_ids=UpperCAmelCase_ , max_length=model.config.max_length - num_paddings )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ):
snake_case_ = "facebook/opt-350m"
snake_case_ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ = []
snake_case_ = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
snake_case_ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
snake_case_ = tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids
snake_case_ = model.generate(UpperCAmelCase_ , max_length=10 )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 508 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __snake_case ( lowercase : np.ndarray , lowercase : float ):
# For applying gaussian function for each element in matrix.
snake_case_ = math.sqrt(lowercase )
snake_case_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( lowercase : np.ndarray , lowercase : int , lowercase : int , lowercase : int ):
snake_case_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( lowercase : int , lowercase : float ):
# Creates a gaussian kernel of given dimension.
snake_case_ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowercase ):
for j in range(0 , lowercase ):
snake_case_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowercase , lowercase )
def __snake_case ( lowercase : np.ndarray , lowercase : float , lowercase : float , lowercase : int , ):
snake_case_ = np.zeros(img.shape )
snake_case_ = get_gauss_kernel(lowercase , lowercase )
snake_case_ , snake_case_ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case_ = get_slice(lowercase , lowercase , lowercase , lowercase )
snake_case_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case_ = vec_gaussian(lowercase , lowercase )
snake_case_ = np.multiply(lowercase , lowercase )
snake_case_ = np.multiply(lowercase , lowercase )
snake_case_ = np.sum(lowercase ) / np.sum(lowercase )
snake_case_ = val
return imga
def __snake_case ( lowercase : list ):
snake_case_ = args[1] if args[1:] else "../image_data/lena.jpg"
snake_case_ = float(args[2] ) if args[2:] else 1.0
snake_case_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case_ = int(args[4] )
snake_case_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase__ = img / 2_55
lowercase__ = out.astype('''float32''')
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 2_55
lowercase__ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 508 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def _lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for char in word:
__lowerCAmelCase = ord(_UpperCamelCase )
if not _is_chinese_char(_UpperCamelCase ):
return 0
return 1
def _lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = set()
for token in tokens:
__lowerCAmelCase = len(_UpperCamelCase ) > 1 and is_chinese(_UpperCamelCase )
if chinese_word:
word_set.add(_UpperCamelCase )
__lowerCAmelCase = list(_UpperCamelCase )
return word_list
def _lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__lowerCAmelCase = max([len(_UpperCamelCase ) for w in chinese_word_set] )
__lowerCAmelCase = bert_tokens
__lowerCAmelCase , __lowerCAmelCase = 0, len(_UpperCamelCase )
while start < end:
__lowerCAmelCase = True
if is_chinese(bert_word[start] ):
__lowerCAmelCase = min(end - start , _UpperCamelCase )
for i in range(_UpperCamelCase , 1 , -1 ):
__lowerCAmelCase = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCAmelCase = "##" + bert_word[j]
__lowerCAmelCase = start + i
__lowerCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(0 , len(_UpperCamelCase ) , 100 ):
__lowerCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
__lowerCAmelCase = [get_chinese_word(_UpperCamelCase ) for r in res]
ltp_res.extend(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
__lowerCAmelCase = []
for i in range(0 , len(_UpperCamelCase ) , 100 ):
__lowerCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_UpperCamelCase , truncation=_UpperCamelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
__lowerCAmelCase = []
for input_ids, chinese_word in zip(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = []
for id in input_ids:
__lowerCAmelCase = bert_tokenizer._convert_id_to_token(_UpperCamelCase )
input_tokens.append(_UpperCamelCase )
__lowerCAmelCase = add_sub_symbol(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCamelCase ):
if token[:2] == "##":
__lowerCAmelCase = token[2:]
# save chinese tokens' pos
if len(_UpperCamelCase ) == 1 and _is_chinese_char(ord(_UpperCamelCase ) ):
ref_id.append(_UpperCamelCase )
ref_ids.append(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
return ref_ids
def _lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = [line.strip() for line in data if len(_UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCAmelCase = LTP(args.ltp ) # faster in GPU device
__lowerCAmelCase = BertTokenizer.from_pretrained(args.bert )
__lowerCAmelCase = prepare_ref(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__lowerCAmelCase = [json.dumps(_UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
A : List[Any] = parser.parse_args()
main(args)
| 701 |
"""simple docstring"""
from itertools import product
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = sides_number
__lowerCAmelCase = max_face_number * dice_number
__lowerCAmelCase = [0] * (max_total + 1)
__lowerCAmelCase = 1
__lowerCAmelCase = range(_UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase , repeat=_UpperCamelCase ):
__lowerCAmelCase = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowerCAmelCase = 0
__lowerCAmelCase = 9
__lowerCAmelCase = 4 * 9
__lowerCAmelCase = 6
for peter_total in range(_UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowerCAmelCase = (4**9) * (6**6)
__lowerCAmelCase = peter_wins_count / total_games_number
__lowerCAmelCase = round(_UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 282 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase_ = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer
def __init__( self : List[str] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int="<|endoftext|>" , _UpperCamelCase : List[Any]="<|endoftext|>" , _UpperCamelCase : Any="<|endoftext|>" , _UpperCamelCase : List[str]=False , **_UpperCamelCase : Tuple , ) ->Optional[Any]:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = kwargs.pop('''add_bos_token''' , _UpperCamelCase )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
snake_case_ = getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_UpperCamelCase )
snake_case_ = add_prefix_space
def snake_case__( self : Tuple , *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[str] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : str , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : "Conversation" ) ->List[int]:
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids | 39 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a__ :
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = 1_3
__lowerCAmelCase = 7
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 9_9
__lowerCAmelCase = 3_2
__lowerCAmelCase = 2
__lowerCAmelCase = 4
__lowerCAmelCase = 3_7
__lowerCAmelCase = "gelu"
__lowerCAmelCase = 0.1
__lowerCAmelCase = 0.1
__lowerCAmelCase = 5_1_2
__lowerCAmelCase = 1_6
__lowerCAmelCase = 2
__lowerCAmelCase = 0.02
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFRoFormerModel(config=_A )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TFRoFormerForCausalLM(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFRoFormerForMaskedLM(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFRoFormerForSequenceClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFRoFormerForMultipleChoice(config=_A )
__lowerCAmelCase = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFRoFormerForTokenClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFRoFormerForQuestionAnswering(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Union[str, Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Dict = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Optional[Any] = False
_a : List[str] = False
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFRoFormerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase = model(_A )[0]
# TODO Replace vocab size
__lowerCAmelCase = 5_0_0_0_0
__lowerCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , _A )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCAmelCase = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
@require_tf
class a__ ( unittest.TestCase ):
_a : str = 1E-4
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = tf.constant([[4, 1_0]] )
__lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCAmelCase = emba(input_ids.shape )
__lowerCAmelCase = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(_A , _A , atol=self.tolerance )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
__lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__lowerCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(_A , _A , atol=self.tolerance )
@require_tf
class a__ ( unittest.TestCase ):
_a : Any = 1E-4
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCAmelCase = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__lowerCAmelCase = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__lowerCAmelCase , __lowerCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_A , _A , _A )
__lowerCAmelCase = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
__lowerCAmelCase = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _A , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _A , atol=self.tolerance )
| 552 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Tuple = """nllb-moe"""
_a : Dict = ["""past_key_values"""]
_a : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _A=1_2_8_1_1_2 , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.05 , _A=0.05 , _A=True , _A=True , _A="relu" , _A=1_0_2_4 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.02 , _A=2 , _A=True , _A=False , _A="float32" , _A=False , _A=1_2_8 , _A=6_4 , _A=4 , _A=4 , _A=0.0_01 , _A=0.0_01 , _A="all" , _A=False , _A=False , _A=1.0 , _A=0.2 , _A=1 , _A=0 , _A=2 , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
| 552 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any]=2 , snake_case_ : List[str]=32 , snake_case_ : List[str]=16 , snake_case_ : List[Any]=3 , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : str=32 , snake_case_ : Any=4 , snake_case_ : int=[0, 1, 2, 3] , snake_case_ : Union[str, Any]=4 , snake_case_ : Dict=37 , snake_case_ : Any="gelu" , snake_case_ : str=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=[1, 384, 24, 24] , snake_case_ : str=True , snake_case_ : str=None , ):
"""simple docstring"""
A : Optional[Any] = parent
A : int = batch_size
A : int = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Union[str, Any] = is_training
A : Optional[int] = use_labels
A : List[str] = hidden_size
A : str = num_hidden_layers
A : List[str] = backbone_out_indices
A : Dict = num_attention_heads
A : List[Any] = intermediate_size
A : Tuple = hidden_act
A : Optional[Any] = hidden_dropout_prob
A : Optional[int] = attention_probs_dropout_prob
A : List[str] = initializer_range
A : Union[str, Any] = num_labels
A : List[Any] = backbone_featmap_shape
A : Union[str, Any] = scope
A : List[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A : List[Any] = (image_size // patch_size) ** 2
A : int = num_patches + 1
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Union[str, Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A : str = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=snake_case_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def _UpperCAmelCase ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
"""simple docstring"""
A : List[Any] = DPTModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ):
"""simple docstring"""
A : int = self.num_labels
A : int = DPTForDepthEstimation(snake_case_ )
model.to(snake_case_ )
model.eval()
A : int = model(snake_case_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _UpperCAmelCase ( self : int , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
"""simple docstring"""
A : Any = self.num_labels
A : Union[str, Any] = DPTForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
A : List[Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Any = self.prepare_config_and_inputs()
A , A , A : List[str] = config_and_inputs
A : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case, snake_case, unittest.TestCase ):
lowerCamelCase_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase_ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Dict = DPTModelTester(self )
A : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Tuple = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(snake_case_ )
A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : int = [*signature.parameters.keys()]
A : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A , A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Tuple = True
if model_class in get_values(snake_case_ ):
continue
A : List[str] = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
A : str = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
A : Union[str, Any] = model(**snake_case_ ).loss
loss.backward()
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A , A : Any = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = False
A : List[Any] = True
if model_class in get_values(snake_case_ ) or not model_class.supports_gradient_checkpointing:
continue
A : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.gradient_checkpointing_enable()
model.train()
A : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
A : Tuple = model(**snake_case_ ).loss
loss.backward()
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : List[Any] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
A : str = model_class(config=snake_case_ )
# Skip the check for the backbone
A : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A : int = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A : List[Any] = DPTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = '''add'''
with self.assertRaises(snake_case_ ):
A : List[Any] = DPTForDepthEstimation(snake_case_ )
def _lowerCamelCase ( ):
'''simple docstring'''
A : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A : Optional[int] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(snake_case_ )
A : Optional[int] = prepare_img()
A : List[str] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
A : Optional[Any] = model(**snake_case_ )
A : int = outputs.predicted_depth
# verify the predicted depth
A : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , snake_case_ )
A : Dict = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , snake_case_ , atol=1E-4 ) ) | 256 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase_ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {F'''funnel-transformer/{name}''': 5_12 for name in _model_names}
UpperCamelCase_ = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = FunnelTokenizer
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = 2
def __init__( self : Union[str, Any] , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : str=True , snake_case_ : Optional[Any]="<unk>" , snake_case_ : List[str]="<sep>" , snake_case_ : str="<pad>" , snake_case_ : Optional[Any]="<cls>" , snake_case_ : Dict="<mask>" , snake_case_ : Dict="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : int=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=None , snake_case_ : str="##" , **snake_case_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , clean_text=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , wordpieces_prefix=snake_case_ , **snake_case_ , )
A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
A : Union[str, Any] = getattr(snake_case_ , normalizer_state.pop('''type''' ) )
A : List[str] = do_lower_case
A : int = strip_accents
A : int = tokenize_chinese_chars
A : Any = normalizer_class(**snake_case_ )
A : Any = do_lower_case
def _UpperCAmelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any]=None ):
"""simple docstring"""
A : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
"""simple docstring"""
A : Optional[int] = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
A : Dict = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ ) | 256 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """speech_to_text"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase_ : int=1_0_0_0_0 , lowerCAmelCase_ : Optional[Any]=1_2 , lowerCAmelCase_ : Optional[int]=2_0_4_8 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Optional[Any]=6 , lowerCAmelCase_ : int=2_0_4_8 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Tuple=6_0_0_0 , lowerCAmelCase_ : List[str]=1_0_2_4 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Any=(5, 5) , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=8_0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[Any] , ) -> int:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = list(lowerCAmelCase_ )
__lowerCAmelCase = conv_channels
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 421 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if not (isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and isinstance(lowerCAmelCase_, lowerCAmelCase_ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase = i
__lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCAmelCase : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
_lowerCAmelCase : str = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_lowerCAmelCase : str = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_lowerCAmelCase : Optional[int] = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCAmelCase : int = [0] * args.vocab_size
for k, v in counter.items():
_lowerCAmelCase : List[Any] = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 454 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : Dict ):
a__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
a__ : int = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
a__ : Optional[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a__ : Any = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
a__ : Tuple = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
a__ : Any = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a__ : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
a__ : List[str] = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
a__ : List[Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
a__ : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
a__ : str = key[key.find('''block''' ) + len('''block''' )]
a__ : Dict = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
a__ : Optional[int] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
a__ : Optional[int] = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
a__ : Optional[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
a__ : Tuple = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
a__ : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
a__ : Any = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
a__ : List[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
a__ : Optional[Any] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a__ : List[str] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
a__ : int = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
a__ : int = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
a__ : Dict = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
a__ : Optional[int] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
a__ : List[Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
a__ : str = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
a__ : Any = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
a__ : Any = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
a__ : str = key.replace('''module.last_layer_depth''' , '''head.head''' )
a__ : Any = value
return new_state_dict
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a__ : int = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
a__ : Optional[int] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
a__ : str = kv_weight[
: config.hidden_sizes[i], :
]
a__ : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
a__ : int = kv_weight[
config.hidden_sizes[i] :, :
]
a__ : Tuple = kv_bias[config.hidden_sizes[i] :]
def __a ( ):
a__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Dict=None ):
a__ : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
a__ : List[Any] = GLPNImageProcessor()
# prepare image
a__ : Optional[Any] = prepare_img()
a__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
a__ : Dict = torch.load(lowerCAmelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
a__ : Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
a__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
a__ : str = model(lowerCAmelCase__ )
a__ : Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
a__ : Optional[int] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
a__ : int = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 704 |
'''simple docstring'''
import os
def __a ( ):
with open(os.path.dirname(lowerCAmelCase__ ) + '''/grid.txt''' ) as f:
a__ : Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
a__ : List[str] = 0
# right
for i in range(20 ):
for j in range(17 ):
a__ : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a__ : str = temp
# down
for i in range(17 ):
for j in range(20 ):
a__ : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a__ : Dict = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a__ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a__ : Optional[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a__ : str = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a__ : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = str(__UpperCAmelCase )
lowerCamelCase_ : Any = [n]
for i in range(1 , len(__UpperCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if len(str(__UpperCAmelCase ) ) > 3:
if not is_prime(int(str(__UpperCAmelCase )[-3:] ) ) or not is_prime(int(str(__UpperCAmelCase )[:3] ) ):
return False
return True
def __snake_case (__UpperCAmelCase = 11 ):
"""simple docstring"""
lowerCamelCase_ : list[int] = []
lowerCamelCase_ : List[str] = 13
while len(__UpperCAmelCase ) != count:
if validate(__UpperCAmelCase ):
lowerCamelCase_ : Any = list_truncated_nums(__UpperCAmelCase )
if all(is_prime(__UpperCAmelCase ) for i in list_nums ):
list_truncated_primes.append(__UpperCAmelCase )
num += 2
return list_truncated_primes
def __snake_case ():
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 501 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "token-classification"
def __init__( self : Tuple , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowerCamelCase_ : Tuple = Namespace(**UpperCamelCase_ )
lowerCamelCase_ : Any = import_module('''tasks''' )
try:
lowerCamelCase_ : Any = getattr(UpperCamelCase_ , hparams.task_type )
lowerCamelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCamelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase_ : Optional[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Dict , **UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[str] = self(**UpperCamelCase_ )
lowerCamelCase_ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase_ : List[Any] = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCamelCase_ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowerCamelCase_ : int = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCamelCase_ : Dict = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.load(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase_ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase_ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
"""Compute validation""" ""
lowerCamelCase_ : Optional[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[Any] = self(**UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = outputs[:2]
lowerCamelCase_ : List[Any] = logits.detach().cpu().numpy()
lowerCamelCase_ : str = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowerCamelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowerCamelCase_ : Optional[Any] = np.argmax(UpperCamelCase_ , axis=2 )
lowerCamelCase_ : List[str] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCamelCase_ : List[str] = dict(enumerate(self.labels ) )
lowerCamelCase_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase_ : Optional[Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowerCamelCase_ : Union[str, Any] = dict(results.items() )
lowerCamelCase_ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = self._eval_end(UpperCamelCase_ )
lowerCamelCase_ : Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase_ : Optional[int] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : List[Any] = NERTransformer(args)
__lowerCamelCase : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 501 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class _lowerCamelCase :
def __init__( self : Optional[Any] , UpperCamelCase : int | None = None ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = value
lowerCAmelCase__ : str = random()
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : Any ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = str(self.value ) + """ """
lowerCAmelCase__ : Dict = str(self.left or """""" )
lowerCAmelCase__ : List[Any] = str(self.right or """""" )
return value + left + right
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = split(root.left , __UpperCAmelCase )
return left, root
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = split(root.right , __UpperCAmelCase )
return root, right
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase__ : Any = merge(left.right , __UpperCAmelCase )
return left
else:
lowerCAmelCase__ : Union[str, Any] = merge(__UpperCAmelCase , right.left )
return right
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Node | None:
lowerCAmelCase__ : Optional[int] = Node(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : str = split(__UpperCAmelCase , __UpperCAmelCase )
return merge(merge(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Node | None:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = split(__UpperCAmelCase , value - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = split(__UpperCAmelCase , __UpperCAmelCase )
return merge(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase__ : Dict = insert(__UpperCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
lowerCAmelCase__ : str = erase(__UpperCAmelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def lowercase_ ( ) -> None:
lowerCAmelCase__ : List[str] = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
lowerCAmelCase__ : Union[str, Any] = input()
while args != "q":
lowerCAmelCase__ : Union[str, Any] = interact_treap(__UpperCAmelCase , __UpperCAmelCase )
print(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 507 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCAmelCase )
return parser.parse_args()
def lowercase_ ( ) -> str:
lowerCAmelCase__ : Optional[int] = parse_args()
# Import training_script as a module.
lowerCAmelCase__ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ : Optional[int] = script_fpath.stem
lowerCAmelCase__ : Dict = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
lowerCAmelCase__ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 507 | 1 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( a_ :BertModel , a_ :str , a_ :str) -> str:
__a : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__a : Any = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(a_):
os.makedirs(a_)
__a : List[Any] = model.state_dict()
def to_tf_var_name(a_ :str):
for patt, repl in iter(a_):
__a : int = name.replace(a_ , a_)
return F"""bert/{name}"""
def create_tf_var(a_ :np.ndarray , a_ :str , a_ :tf.Session):
__a : int = tf.dtypes.as_dtype(tensor.dtype)
__a : Optional[int] = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(a_)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Any = to_tf_var_name(a_)
__a : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
__a : List[Any] = torch_tensor.T
__a : Optional[Any] = create_tf_var(tensor=a_ , name=a_ , session=a_)
tf.keras.backend.set_value(a_ , a_)
__a : int = session.run(a_)
print(F"""Successfully created {tf_name}: {np.allclose(a_ , a_)}""")
__a : Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(a_ , os.path.join(a_ , model_name.replace('''-''' , '''_''') + '''.ckpt'''))
def __A ( a_ :int=None) -> str:
__a : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , required=a_ , help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''' , type=a_ , default=a_ , required=a_ , help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''' , type=a_ , required=a_ , help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''' , type=a_ , required=a_ , help='''Directory in which to save tensorflow model''')
__a : Optional[Any] = parser.parse_args(a_)
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main() | 52 |
"""simple docstring"""
def A_ ( lowercase ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def A_ ( lowercase ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = number
while duplicate > 0:
UpperCAmelCase_ ,UpperCAmelCase_ : Tuple = divmod(lowercase , 10 )
fact_sum += factorial(lowercase )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowercase_ = int(input("Enter number: ").strip())
print(
f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 470 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __magic_name__( unittest.TestCase ):
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6},
}
}
snake_case__ = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_2_8,
"""task_specific_params.summarization.min_length""": 1_2,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_4_2,
"""task_specific_params.summarization_cnn.min_length""": 5_6,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 6_2,
"""task_specific_params.summarization_xsum.min_length""": 1_1,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , x.transpose() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , transpose(__UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , transpose(__UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , transpose(__UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , transpose(__UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , np.asarray(transpose(__UpperCamelCase ) ) ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__UpperCamelCase , axes=(1, 2, 0) ) ) ) )
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , np.reshape(__UpperCamelCase , (4, 3) ) ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (1_2, 5) ) , np.reshape(__UpperCamelCase , (1_2, 5) ) ) )
@require_torch
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , reshape(__UpperCamelCase , (4, 3) ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (1_2, 5) ) , reshape(__UpperCamelCase , (1_2, 5) ).numpy() ) )
@require_tf
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , reshape(__UpperCamelCase , (4, 3) ).numpy() ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (1_2, 5) ) , reshape(__UpperCamelCase , (1_2, 5) ).numpy() ) )
@require_flax
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , np.asarray(reshape(__UpperCamelCase , (4, 3) ) ) ) )
snake_case__ = np.random.randn(3 , 4 , 5 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (1_2, 5) ) , np.asarray(reshape(__UpperCamelCase , (1_2, 5) ) ) ) )
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , np.squeeze(__UpperCamelCase ) ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , np.squeeze(__UpperCamelCase , axis=2 ) ) )
@require_torch
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(1 , 3 , 4 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , squeeze(__UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , squeeze(__UpperCamelCase , axis=2 ).numpy() ) )
@require_tf
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = np.random.randn(1 , 3 , 4 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , squeeze(__UpperCamelCase ).numpy() ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , squeeze(__UpperCamelCase , axis=2 ).numpy() ) )
@require_flax
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(1 , 3 , 4 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , np.asarray(squeeze(__UpperCamelCase ) ) ) )
snake_case__ = np.random.randn(1 , 4 , 1 , 5 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , np.asarray(squeeze(__UpperCamelCase , axis=2 ) ) ) )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , np.expand_dims(__UpperCamelCase , axis=1 ) ) )
@require_torch
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , expand_dims(__UpperCamelCase , axis=1 ).numpy() ) )
@require_tf
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , expand_dims(__UpperCamelCase , axis=1 ).numpy() ) )
@require_flax
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = np.random.randn(3 , 4 )
snake_case__ = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , np.asarray(expand_dims(__UpperCamelCase , axis=1 ) ) ) ) | 566 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 566 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'ylacombe/bark-small'
__A : List[str] = tempfile.mkdtemp()
__A : int = 'en_speaker_1'
__A : List[Any] = 'This is a test string'
__A : Any = 'speaker_embeddings_path.json'
__A : int = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[int] = BarkProcessor(tokenizer=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__A : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : List[str] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__A : str = 35
__A : Tuple = 2
__A : str = 8
__A : Union[str, Any] = {
'semantic_prompt': np.ones(_UpperCAmelCase),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
__A : int = processor(text=self.input_string , voice_preset=_UpperCAmelCase)
__A : int = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([])).tolist())
# test loading voice preset from npz file
__A : int = os.path.join(self.tmpdirname , 'file.npz')
np.savez(_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = processor(text=self.input_string , voice_preset=_UpperCAmelCase)
__A : Any = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([])).tolist())
# test loading voice preset from the hub
__A : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizer()
__A : Optional[int] = BarkProcessor(tokenizer=_UpperCAmelCase)
__A : Dict = processor(text=self.input_string)
__A : str = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist()) | 8 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 0 |
def lowerCamelCase_ ( _lowercase ) -> list:
__A : List[Any] = [0] * len(_lowercase )
for i in range(1 , len(_lowercase ) ):
# use last results for better performance - dynamic programming
__A : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__A : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__A : Any = j
return prefix_result
def lowerCamelCase_ ( _lowercase ) -> int:
return max(prefix_function(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 | import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Dict:
__A : Dict = {"add_prefix_space": True} if isinstance(_lowercase , _lowercase ) and not line.startswith(" " ) else {}
__A : Tuple = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding="max_length" if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase=None , ) -> Dict:
__A : Optional[int] = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ):
super().__init__()
__A : Union[str, Any] = Path(__UpperCAmelCase ).joinpath(type_path + ".source" )
__A : int = Path(__UpperCAmelCase ).joinpath(type_path + ".target" )
__A : Any = self.get_char_lens(self.src_file )
__A : List[str] = max_source_length
__A : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__A : Dict = tokenizer
__A : Union[str, Any] = prefix
if n_obs is not None:
__A : List[str] = self.src_lens[:n_obs]
__A : Tuple = src_lang
__A : Optional[int] = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , __UpperCAmelCase ):
__A : str = index + 1 # linecache starts at 1
__A : str = self.prefix + linecache.getline(str(self.src_file ) , __UpperCAmelCase ).rstrip("\n" )
__A : List[str] = linecache.getline(str(self.tgt_file ) , __UpperCAmelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__A : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer
)
__A : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer
__A : Optional[int] = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right" )
__A : Dict = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right" )
__A : List[str] = source_inputs["input_ids"].squeeze()
__A : List[Any] = target_inputs["input_ids"].squeeze()
__A : str = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase ):
return [len(__UpperCAmelCase ) for x in Path(__UpperCAmelCase ).open().readlines()]
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Tuple = torch.stack([x["input_ids"] for x in batch] )
__A : Optional[Any] = torch.stack([x["attention_mask"] for x in batch] )
__A : int = torch.stack([x["decoder_input_ids"] for x in batch] )
__A : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__A : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase )
else self.tokenizer.pad_token_id
)
__A : str = trim_batch(__UpperCAmelCase , __UpperCAmelCase )
__A , __A : Any = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase )
__A : List[Any] = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase = getLogger(__name__)
def lowerCamelCase_ ( _lowercase ) -> List[str]:
return list(itertools.chain.from_iterable(_lowercase ) )
def lowerCamelCase_ ( _lowercase ) -> None:
__A : Optional[int] = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , "git_log.json" ) )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[str]:
with open(_lowercase , "w" ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def lowerCamelCase_ ( _lowercase ) -> Any:
with open(_lowercase ) as f:
return json.load(_lowercase )
def lowerCamelCase_ ( ) -> Optional[int]:
__A : Tuple = git.Repo(search_parent_directories=_lowercase )
__A : Optional[Any] = {
"repo_id": str(_lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase_ ( _lowercase , _lowercase ) -> List:
return list(map(_lowercase , _lowercase ) )
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Optional[Any]:
with open(_lowercase , "wb" ) as f:
return pickle.dump(_lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase ) -> Optional[Any]:
def remove_articles(_lowercase ):
return re.sub(r"\b(a|an|the)\b" , " " , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__A : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def lowerCamelCase_ ( _lowercase , _lowercase ) -> List[str]:
__A : Tuple = normalize_answer(_lowercase ).split()
__A : Optional[int] = normalize_answer(_lowercase ).split()
__A : Tuple = Counter(_lowercase ) & Counter(_lowercase )
__A : int = sum(common.values() )
if num_same == 0:
return 0
__A : Dict = 1.0 * num_same / len(_lowercase )
__A : str = 1.0 * num_same / len(_lowercase )
__A : int = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( _lowercase , _lowercase ) -> List[Any]:
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Dict:
assert len(_lowercase ) == len(_lowercase )
__A : Optional[int] = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def lowerCamelCase_ ( _lowercase ) -> Dict:
return model_prefix.startswith("rag" )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
__A : List[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__A : Tuple = "dropout_rate"
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__A : int = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 387 | 0 |
from typing import Any
class __lowercase :
def __init__(self : List[Any] , snake_case : Any ) -> Optional[int]:
_lowercase : Optional[int] = data
_lowercase : Optional[Any] = None
class __lowercase :
def __init__(self : Tuple ) -> List[Any]:
_lowercase : str = None
def _a(self : Union[str, Any] ) -> Dict:
_lowercase : Dict = self.head
while temp is not None:
print(temp.data , end=" " )
_lowercase : Union[str, Any] = temp.next
print()
def _a(self : int , snake_case : Any ) -> Union[str, Any]:
_lowercase : int = Node(snake_case )
_lowercase : Any = self.head
_lowercase : Optional[int] = new_node
def _a(self : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> int:
if node_data_a == node_data_a:
return
else:
_lowercase : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
_lowercase : Optional[int] = node_a.next
_lowercase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_lowercase : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
_lowercase , _lowercase : Dict = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ : str = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 461 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase ( __snake_case ):
def __init__(self : Dict , snake_case : str , snake_case : str=13 , snake_case : Union[str, Any]=7 , snake_case : int=True , snake_case : Any=True , snake_case : str=False , snake_case : Optional[Any]=True , snake_case : Optional[Any]=99 , snake_case : Dict=32 , snake_case : Union[str, Any]=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : Optional[int]="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : List[Any]=512 , snake_case : List[Any]=16 , snake_case : Optional[int]=2 , snake_case : Tuple=0.02 , snake_case : Union[str, Any]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> List[Any]:
_lowercase : Dict = parent
_lowercase : int = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : int = is_training
_lowercase : Dict = use_input_mask
_lowercase : Union[str, Any] = use_token_type_ids
_lowercase : Tuple = use_labels
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : int = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : Optional[Any] = num_choices
_lowercase : str = scope
def _a(self : int ) -> Dict:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Any ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : Dict ) -> Optional[int]:
_lowercase : Optional[int] = DistilBertModel(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[Any] = model(snake_case , snake_case )
_lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
_lowercase : Optional[int] = DistilBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a(self : Tuple , snake_case : List[str] , snake_case : Any , snake_case : List[str] , snake_case : Dict , snake_case : str , snake_case : str ) -> Any:
_lowercase : Dict = DistilBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[str] = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a(self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
_lowercase : str = self.num_labels
_lowercase : Any = DistilBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a(self : int , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str ) -> str:
_lowercase : str = self.num_labels
_lowercase : List[str] = DistilBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a(self : List[str] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : int , snake_case : Union[str, Any] ) -> Optional[Any]:
_lowercase : str = self.num_choices
_lowercase : Dict = DistilBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a(self : List[str] ) -> List[str]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
_A = True
_A = True
_A = True
def _a(self : Dict ) -> List[Any]:
_lowercase : Optional[Any] = DistilBertModelTester(self )
_lowercase : str = ConfigTester(self , config_class=snake_case , dim=37 )
def _a(self : int ) -> List[str]:
self.config_tester.run_common_tests()
def _a(self : Optional[Any] ) -> Optional[int]:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case )
def _a(self : Any ) -> int:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case )
def _a(self : Dict ) -> List[Any]:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case )
def _a(self : str ) -> Tuple:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case )
def _a(self : Any ) -> List[Any]:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case )
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case )
@slow
def _a(self : Optional[Any] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = DistilBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@slow
@require_torch_gpu
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowercase : str = True
_lowercase : Tuple = model_class(config=snake_case )
_lowercase : str = self._prepare_for_class(snake_case , snake_case )
_lowercase : Optional[int] = torch.jit.trace(
snake_case , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case , os.path.join(snake_case , "traced_model.pt" ) )
_lowercase : Dict = torch.jit.load(os.path.join(snake_case , "traced_model.pt" ) , map_location=snake_case )
loaded(inputs_dict["input_ids"].to(snake_case ) , inputs_dict["attention_mask"].to(snake_case ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : int ) -> str:
_lowercase : Any = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowercase : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowercase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
_lowercase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
_lowercase : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1e-4 ) )
| 461 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Optional[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : Optional[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : Tuple = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def A__ ( self , __lowercase , __lowercase , __lowercase=False ):
UpperCAmelCase__ = spearmanr(snake_case_ , snake_case_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 721 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
a : Tuple = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['input_ids', 'attention_mask']
__lowercase : Dict = GPTaTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase=False , **__lowercase , ):
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
UpperCAmelCase__ = kwargs.pop("""add_bos_token""" , __lowercase )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowercase ) != add_prefix_space:
UpperCAmelCase__ = getattr(__lowercase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**__lowercase )
UpperCAmelCase__ = add_prefix_space
def A__ ( self , *__lowercase , **__lowercase ):
UpperCAmelCase__ = kwargs.get("""is_split_into_words""" , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def A__ ( self , *__lowercase , **__lowercase ):
UpperCAmelCase__ = kwargs.get("""is_split_into_words""" , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def A__ ( self , __lowercase , __lowercase = None ):
UpperCAmelCase__ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 422 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 117 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :List[str] , a :Tuple=False ) -> List[Any]:
a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( a :List[str] , a :int , a :Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
a = ''''''
else:
a = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[
: config.hidden_size, :
]
a = in_proj_bias[: config.hidden_size]
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = in_proj_bias[-config.hidden_size :]
def _a ( a :List[Any] ) -> Dict:
a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a , a )
def _a ( a :Union[str, Any] , a :str , a :List[Any] ) -> str:
a = dct.pop(a )
a = val
def _a ( ) -> Optional[Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _a ( a :Tuple , a :str , a :Optional[int]=True ) -> Dict:
a = ViTConfig()
# patch_size
if model_name[-1] == "8":
a = 8
# set labels if required
if not base_model:
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a = 384
a = 1_536
a = 12
a = 6
# load original model from torch hub
a = torch.hub.load('''facebookresearch/dino:main''' , a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a = original_model.state_dict()
if base_model:
remove_classification_head_(a )
a = create_rename_keys(a , base_model=a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
if base_model:
a = ViTModel(a , add_pooling_layer=a ).eval()
else:
a = ViTForImageClassification(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by ViTImageProcessor
a = ViTImageProcessor()
a = image_processor(images=prepare_img() , return_tensors='''pt''' )
a = encoding['''pixel_values''']
a = model(a )
if base_model:
a = original_model(a )
assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
a = original_model(a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 117 | 1 |
def a ( A__ : int , A__ : int ) -> Tuple:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase =str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
_lowercase =str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
_lowercase =max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import math
import tensorflow as tf
from packaging import version
def a ( A__ : Tuple ) -> List[Any]:
"""simple docstring"""
_lowercase =tf.convert_to_tensor(A__ )
_lowercase =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a ( A__ : Any ) -> int:
"""simple docstring"""
_lowercase =tf.convert_to_tensor(A__ )
_lowercase =tf.cast(math.pi , x.dtype )
_lowercase =tf.cast(0.044715 , x.dtype )
_lowercase =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A__ , 3 )) ))
return x * cdf
def a ( A__ : Optional[int] ) -> int:
"""simple docstring"""
_lowercase =tf.convert_to_tensor(A__ )
return x * tf.tanh(tf.math.softplus(A__ ) )
def a ( A__ : Tuple ) -> List[str]:
"""simple docstring"""
_lowercase =tf.convert_to_tensor(A__ )
_lowercase =tf.cast(0.044715 , x.dtype )
_lowercase =tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a ( A__ : int ) -> Any:
"""simple docstring"""
_lowercase =tf.convert_to_tensor(A__ )
_lowercase =tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a ( A__ : int ) -> Dict:
"""simple docstring"""
return tf.clip_by_value(_gelu(A__ ) , -10 , 10 )
def a ( A__ : Optional[int] , A__ : int=-1 ) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase =tf.split(A__ , 2 , axis=A__ )
return a * tf.math.sigmoid(A__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a ( A__ : Any ) -> Dict:
"""simple docstring"""
return tf.keras.activations.gelu(A__ , approximate=A__ )
lowercase_ = tf.keras.activations.gelu
lowercase_ = approximate_gelu_wrap
else:
lowercase_ = _gelu
lowercase_ = _gelu_new
lowercase_ = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a ( A__ : Optional[int] ) -> str:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 380 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
A__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a ( lowerCamelCase_=None ):
'''simple docstring'''
if subparsers is not None:
lowercase__ = subparsers.add_parser('''test''' )
else:
lowercase__ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowercase__ = script_name
else:
lowercase__ = F"""--config_file={args.config_file} {script_name}"""
lowercase__ = ['''accelerate-launch'''] + test_args.split()
lowercase__ = execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def a ( ):
'''simple docstring'''
lowercase__ = test_command_parser()
lowercase__ = parser.parse_args()
test_command(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 183 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class a__ ( _lowercase ):
__magic_name__ : Dict = "gpt_neox"
def __init__(self : Any, __UpperCAmelCase : Dict=50432, __UpperCAmelCase : Union[str, Any]=6144, __UpperCAmelCase : int=44, __UpperCAmelCase : Optional[int]=64, __UpperCAmelCase : int=24576, __UpperCAmelCase : List[Any]="gelu", __UpperCAmelCase : Tuple=0.25, __UpperCAmelCase : Any=10000, __UpperCAmelCase : List[Any]=0.0, __UpperCAmelCase : Any=0.0, __UpperCAmelCase : str=0.1, __UpperCAmelCase : List[Any]=2048, __UpperCAmelCase : int=0.02, __UpperCAmelCase : str=1e-5, __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : int=0, __UpperCAmelCase : int=2, __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : int=True, __UpperCAmelCase : Any=None, **__UpperCAmelCase : List[Any], ) -> Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = rotary_pct
SCREAMING_SNAKE_CASE : Optional[Any] = rotary_emb_base
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Any = tie_word_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = use_parallel_residual
SCREAMING_SNAKE_CASE : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def lowercase__ (self : Optional[int] ) -> str:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE : Dict = self.rope_scaling.get('''type''', __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''factor''', __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase, __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 355 |
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Tuple = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : List[Any] = 0
while number > 0:
SCREAMING_SNAKE_CASE : List[str] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00 ):
SCREAMING_SNAKE_CASE : List[Any] = factorial(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = split_and_add(_SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 355 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case ( lowerCAmelCase_ ) -> Tuple:
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_snake_case = [image]
_snake_case = [trans(img.convert('''RGB''' ) ) for img in image]
_snake_case = torch.stack(lowerCAmelCase_ )
return image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_snake_case = min(int(num_inference_steps * strength ) , __lowerCamelCase )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}""" )
_snake_case = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_snake_case = init_latents.shape
_snake_case = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
print('''add noise to latents at timestep''' , __lowerCamelCase )
_snake_case = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = init_latents
return latents
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowerCamelCase : float = 0.8 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 5_0 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
"""simple docstring"""
self.check_inputs(__lowerCamelCase )
# 2. Preprocess image
_snake_case = preprocess(__lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
_snake_case , _snake_case = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
_snake_case = timesteps[:1].repeat(__lowerCamelCase )
# 4. Prepare latent variables
_snake_case = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase )
_snake_case = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase ):
# 1. predict noise model_output
_snake_case = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase )
| 103 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = VideoToVideoSDPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ = False
# No `output_type`.
snake_case__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def a ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> Tuple:
# 3 frames
lowerCAmelCase__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = "np"
lowerCAmelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
lowerCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase__ = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a ( self : List[Any] ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a ( self : List[Any] ) -> str:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def a ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def a ( self : List[str] ) -> Optional[int]:
pass
def a ( self : Optional[Any] ) -> Tuple:
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : str ) -> int:
lowerCAmelCase__ = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ = torch.randn((1, 10, 3, 1_024, 576) , generator=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = video.to("cuda" )
lowerCAmelCase__ = "Spiderman is surfing"
lowerCAmelCase__ = pipe(SCREAMING_SNAKE_CASE__ , video=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=3 , output_type="pt" ).frames
lowerCAmelCase__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 61 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any]=3 , snake_case__ : int=3_2 , snake_case__ : List[str]=3 , snake_case__ : Any=1_0 , snake_case__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , snake_case__ : Any=[1, 1, 2, 1] , snake_case__ : Optional[Any]=True , snake_case__ : Any=True , snake_case__ : Optional[int]="relu" , snake_case__ : List[Any]=3 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Any = parent
lowercase :List[str] = batch_size
lowercase :Dict = image_size
lowercase :Dict = num_channels
lowercase :Tuple = embeddings_size
lowercase :Any = hidden_sizes
lowercase :Any = depths
lowercase :Optional[int] = is_training
lowercase :int = use_labels
lowercase :Tuple = hidden_act
lowercase :Union[str, Any] = num_labels
lowercase :Any = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :int = None
if self.use_labels:
lowercase :Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase :str = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : int ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = RegNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ):
'''simple docstring'''
lowercase :int = self.num_labels
lowercase :List[str] = RegNetForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Tuple = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase :Optional[Any] = config_and_inputs
lowercase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Union[str, Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__A : List[str] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
__A : Any = False
__A : int = False
__A : Any = False
__A : Optional[Any] = False
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Any = RegNetModelTester(self )
lowercase :int = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[str] = model_class(snake_case__ )
lowercase :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Optional[Any] = [*signature.parameters.keys()]
lowercase :Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase , lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[Any] = model_class(config=snake_case__ )
for name, module in model.named_modules():
if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __snake_case ( self : str ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict ):
lowercase :Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase :Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Any = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :Optional[int] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase :int = layer_type
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Tuple = RegNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase () -> Dict:
lowercase :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ )
lowercase :Optional[int] = self.default_image_processor
lowercase :Optional[int] = prepare_img()
lowercase :str = image_processor(images=snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase :List[Any] = model(**snake_case__ )
# verify the logits
lowercase :str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :Optional[int] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 475 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase = '''
Human: <<task>>
Assistant: '''
UpperCAmelCase = '''huggingface-tools/default-prompts'''
UpperCAmelCase = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def lowerCamelCase (a_ :int , a_ :str , a_ :Dict="run") -> Optional[Any]:
if prompt_or_repo_id is None:
lowercase :Tuple = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a_) is not None:
return prompt_or_repo_id
lowercase :List[str] = cached_file(
a_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name})
with open(a_ , '''r''' , encoding='''utf-8''') as f:
return f.read()
| 475 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__lowerCAmelCase :int = logging.get_logger(__name__)
__lowerCAmelCase :Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__lowerCAmelCase :Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def A ( UpperCAmelCase ):
_snake_case : Optional[int] = {}
with open(UpperCAmelCase , "r" ) as file:
for line_number, line in enumerate(UpperCAmelCase ):
_snake_case : Union[str, Any] = line.strip()
if line:
_snake_case : Optional[int] = line.split()
_snake_case : Any = line_number
_snake_case : Dict = words[0]
_snake_case : str = value
return result
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
for attribute in key.split("." ):
_snake_case : List[str] = getattr(UpperCAmelCase , UpperCAmelCase )
_snake_case : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
_snake_case : Optional[int] = PARAM_MAPPING[full_name.split("." )[-1]]
_snake_case : Any = "param"
if weight_type is not None and weight_type != "param":
_snake_case : Tuple = getattr(UpperCAmelCase , UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
_snake_case : str = hf_pointer
for attribute in hf_param_name.split("." ):
_snake_case : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
_snake_case : Tuple = shape_pointer.shape
# let's reduce dimension
_snake_case : Tuple = value[0]
else:
_snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_snake_case : Tuple = value
elif weight_type == "weight_g":
_snake_case : Any = value
elif weight_type == "weight_v":
_snake_case : Optional[Any] = value
elif weight_type == "bias":
_snake_case : int = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
_snake_case : List[Any] = getattr(UpperCAmelCase , UpperCAmelCase )
_snake_case : Tuple = value
else:
_snake_case : str = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
_snake_case : Any = PARAM_MAPPING[full_name.split("." )[-1]]
_snake_case : str = "param"
if weight_type is not None and weight_type != "param":
_snake_case : Any = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_snake_case : int = ".".join([key, hf_param_name] )
else:
_snake_case : Tuple = key
_snake_case : str = value if "lm_head" in full_key else value[0]
__lowerCAmelCase :Tuple = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
_snake_case : int = False
for key, mapped_key in MAPPING.items():
_snake_case : List[str] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case : Union[str, Any] = True
if "*" in mapped_key:
_snake_case : List[Any] = name.split(UpperCAmelCase )[0].split("." )[-2]
_snake_case : List[Any] = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
_snake_case : Union[str, Any] = "weight_g"
elif "weight_v" in name:
_snake_case : int = "weight_v"
elif "bias" in name:
_snake_case : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : Dict = "weight"
else:
_snake_case : str = None
if hf_dict is not None:
rename_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return is_used
return is_used
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
_snake_case : Optional[Any] = True
else:
_snake_case : Dict = load_wavaveca_layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : Any = full_name.split("conv_layers." )[-1]
_snake_case : str = name.split("." )
_snake_case : List[Any] = int(items[0] )
_snake_case : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_snake_case : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_snake_case : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_snake_case : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_snake_case : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ):
if config_path is not None:
_snake_case : Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase )
else:
_snake_case : str = WavaVecaConfig()
if is_seq_class:
_snake_case : Tuple = read_txt_into_dict(UpperCAmelCase )
_snake_case : Tuple = idalabel
_snake_case : str = WavaVecaForSequenceClassification(UpperCAmelCase )
_snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
feature_extractor.save_pretrained(UpperCAmelCase )
elif is_finetuned:
if dict_path:
_snake_case : Optional[int] = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Tuple = target_dict.pad_index
_snake_case : List[Any] = target_dict.bos_index
_snake_case : Dict = target_dict.eos_index
_snake_case : str = len(target_dict.symbols )
_snake_case : List[str] = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_snake_case : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : List[Any] = 0
_snake_case : str = 1
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
_snake_case : Tuple = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
_snake_case : Any = True if config.feat_extract_norm == "layer" else False
_snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
_snake_case : Optional[Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
_snake_case : List[Any] = WavaVecaForCTC(UpperCAmelCase )
else:
_snake_case : List[str] = WavaVecaForPreTraining(UpperCAmelCase )
if is_finetuned or is_seq_class:
_snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_snake_case : List[Any] = argparse.Namespace(task="audio_pretraining" )
_snake_case : List[Any] = fairseq.tasks.setup_task(UpperCAmelCase )
_snake_case : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
_snake_case : Optional[int] = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__lowerCAmelCase :str = parser.parse_args()
__lowerCAmelCase :Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 716 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A ( UpperCAmelCase ):
if hor == 128:
_snake_case : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Tuple = (32, 128, 256)
_snake_case : str = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
_snake_case : List[Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Optional[int] = (32, 64, 128, 256)
_snake_case : int = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
_snake_case : Dict = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_snake_case : Dict = model.state_dict()
_snake_case : Tuple = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
_snake_case : Any = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Optional[int] = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A ( ):
_snake_case : Any = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
_snake_case : Dict = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
_snake_case : Optional[int] = model
_snake_case : List[str] = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Any = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 278 | 0 |
'''simple docstring'''
from PIL import Image
def snake_case ( snake_case : Image ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = image.size
lowerCAmelCase = 0
lowerCAmelCase = image.load()
for i in range(snake_case ):
for j in range(snake_case ):
lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(snake_case ):
for i in range(snake_case ):
lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase : int = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 284 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 381 | 0 |
import copy
import re
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = "hp"
UpperCAmelCase_ = {}
UpperCAmelCase_ = None
@classmethod
def A_ ( cls : str, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = prefix
SCREAMING_SNAKE_CASE__ : str = defaults
cls.build_naming_info()
@staticmethod
def A_ ( _UpperCAmelCase : List[str], _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
if len(_UpperCAmelCase ) == 0:
return ""
SCREAMING_SNAKE_CASE__ : int = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(_UpperCAmelCase ) + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE__ : Tuple = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while integer != 0:
SCREAMING_SNAKE_CASE__ : Dict = chr(ord("A" ) + integer % 1_0 ) + s
integer //= 1_0
return s
SCREAMING_SNAKE_CASE__ : Any = 0
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = word + "#" + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE__ : List[str] = sword
break
SCREAMING_SNAKE_CASE__ : str = short_word
SCREAMING_SNAKE_CASE__ : Tuple = word
return short_word
@staticmethod
def A_ ( _UpperCAmelCase : int, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = param_name.split("_" )
SCREAMING_SNAKE_CASE__ : Dict = [TrialShortNamer.shortname_for_word(_UpperCAmelCase, _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE__ : str = ["", "_"]
for separator in separators:
SCREAMING_SNAKE_CASE__ : Optional[Any] = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE__ : Tuple = shortname
SCREAMING_SNAKE_CASE__ : Any = param_name
return shortname
return param_name
@staticmethod
def A_ ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TrialShortNamer.shortname_for_key(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = short_name
SCREAMING_SNAKE_CASE__ : Tuple = param_name
@classmethod
def A_ ( cls : List[Any] ) -> Optional[int]:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE__ : str = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
SCREAMING_SNAKE_CASE__ : int = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = info
@classmethod
def A_ ( cls : Dict, _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE__ : str = cls.NAMING_INFO["short_param"][k]
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 if v else 0
SCREAMING_SNAKE_CASE__ : Any = "" if isinstance(_UpperCAmelCase, (int, float) ) else "-"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = F'''{key}{sep}{v}'''
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def A_ ( cls : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE__ : Tuple = []
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = repr.split("_" )
SCREAMING_SNAKE_CASE__ : int = {}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = value.split("-" )
else:
SCREAMING_SNAKE_CASE__ : Dict = re.sub("[0-9.]", "", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = float(re.sub("[^0-9.]", "", _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Dict = cls.NAMING_INFO["reverse_short_param"][p_k]
SCREAMING_SNAKE_CASE__ : List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE__ : List[Any] = cls.DEFAULTS[k]
return parameters
| 157 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCamelCase : List[str] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowerCamelCase : List[Any] = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_lowerCamelCase : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCamelCase : Optional[Any] = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowerCamelCase : Any = '''allenai'''
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict((re.sub(R"@@$" , "" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE__ : Tuple = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
SCREAMING_SNAKE_CASE__ : int = d[k] # restore
return da
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE__ : str = basename(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = dirname(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE__ : List[str] = cls.hub_models()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
SCREAMING_SNAKE_CASE__ : List[str] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
SCREAMING_SNAKE_CASE__ : Dict = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = vars(chkpt["args"]["model"] )
SCREAMING_SNAKE_CASE__ : Dict = args["source_lang"]
SCREAMING_SNAKE_CASE__ : Optional[int] = args["target_lang"]
SCREAMING_SNAKE_CASE__ : int = dirname(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , f'''dict.{src_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , f'''dict.{tgt_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE__ : int = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab-src.json" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
break
SCREAMING_SNAKE_CASE__ : Dict = Dictionary.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab-tgt.json" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as fin:
SCREAMING_SNAKE_CASE__ : Optional[Any] = fin.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(R" \d+$" , "" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.0_2,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE__ : Tuple = 5
SCREAMING_SNAKE_CASE__ : Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE__ : str = best_score_hparams[model_dir]["length_penalty"]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = {
"langs": [src_lang, tgt_lang],
"model_max_length": 10_24,
"do_lower_case": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
SCREAMING_SNAKE_CASE__ : List[str] = chkpt["models"][0]
SCREAMING_SNAKE_CASE__ : Dict = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE__ : str = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE__ : List[str] = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 157 | 1 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_UpperCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Optional[Any]:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , "sklearn" )
return (preds == labels).mean()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Union[str, Any]:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , "sklearn" )
lowerCAmelCase__ : Union[str, Any] = simple_accuracy(lowercase__ , lowercase__ )
lowerCAmelCase__ : Tuple = fa_score(y_true=lowercase__ , y_pred=lowercase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> List[Any]:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , "sklearn" )
lowerCAmelCase__ : Optional[int] = pearsonr(lowercase__ , lowercase__ )[0]
lowerCAmelCase__ : Dict = spearmanr(lowercase__ , lowercase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , "sklearn" )
assert len(lowercase__ ) == len(lowercase__ ), F"""Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase__ , lowercase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase__ , lowercase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase__ , lowercase__ )
elif task_name == "qqp":
return acc_and_fa(lowercase__ , lowercase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(lowercase__ )
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , "sklearn" )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(lowercase__ )
| 453 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> float:
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 453 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
_lowerCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a__ : str = Lock()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase__ = min(__A , __A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase__ = max(__A , __A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__A )
def _lowerCAmelCase ( A__ ):
lowercase__ = []
lowercase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase__ = Pipe()
lowercase__ = Pipe()
process_array_.append(
Process(
target=__A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase__ = temp_rs
lowercase__ = temp_rr
for i in range(1 , len(__A ) - 1 ):
lowercase__ = Pipe()
lowercase__ = Pipe()
process_array_.append(
Process(
target=__A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase__ = temp_rs
lowercase__ = temp_rr
process_array_.append(
Process(
target=__A , args=(
len(__A ) - 1,
arr[len(__A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__A ) ):
lowercase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _lowerCAmelCase ( ):
lowercase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__A )
lowercase__ = odd_even_transposition(__A )
print('Sorted List\n' )
print(*__A )
if __name__ == "__main__":
main()
| 622 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
if weight_type is not None:
UpperCAmelCase__ = getattr(__A, __A ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "running_mean":
UpperCAmelCase__ = value
elif weight_type == "running_var":
UpperCAmelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ = value
elif weight_type == "inv_freq":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__A, __A, __A, __A, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(__A )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", __A )
if "pos_bias_u" in name:
UpperCAmelCase__ = None
elif "pos_bias_v" in name:
UpperCAmelCase__ = None
elif "weight_g" in name:
UpperCAmelCase__ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ = "weight_v"
elif "bias" in name:
UpperCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = "weight"
elif "running_mean" in name:
UpperCAmelCase__ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase__ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase__ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ = "num_batches_tracked"
else:
UpperCAmelCase__ = None
set_recursively(__A, __A, __A, __A, __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConformerConfig.from_pretrained(__A, hidden_act="swish" )
else:
UpperCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase__ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(__A, "vocab.json" )
if not os.path.isdir(__A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__A ) )
return
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(__A, "w", encoding="utf-8" ) as vocab_handle:
json.dump(__A, __A )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
__A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=__A, )
UpperCAmelCase__ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=__A, tokenizer=__A )
processor.save_pretrained(__A )
UpperCAmelCase__ = WavaVecaConformerForCTC(__A )
else:
UpperCAmelCase__ = WavaVecaConformerForPreTraining(__A )
if is_finetuned:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(__A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=__A )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(__A, __A, not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 486 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCAmelCase_ = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
UpperCAmelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ConvBertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_snake_case = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[Any]:
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 541 |
from collections import deque
from .hash_table import HashTable
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase_ )
_snake_case = self.values[key]
def lowerCAmelCase ( self ) -> Optional[int]:
return (
sum(self.charge_factor - len(lowerCAmelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase_ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase_ , lowerCAmelCase_ )
| 541 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase__ ( __A :Any ,__A :tuple ,__A :Path ,__A :int ,__A :Union[str, Any] ,__A :Optional[Any] ,__A :Optional[Any] ,__A :List[Any]=False ,):
"""simple docstring"""
output_path.parent.mkdir(parents=__A ,exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,use_external_data_format=__A ,enable_onnx_checker=__A ,opset_version=__A ,)
else:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,opset_version=__A ,)
@torch.no_grad()
def lowerCamelCase__ ( __A :str ,__A :str ,__A :int ,__A :bool = False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__snake_case = """cpu"""
__snake_case = Path(__A )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__A ,model_args=(
torch.randn(1 ,__A ,2_5 ,2_5 ).to(device=__A ,dtype=__A ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=__A ,)
del vae_decoder
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 268 | 0 |
"""simple docstring"""
from math import sqrt
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
for i in range(1 , int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def __snake_case ( UpperCamelCase__ = 10000 ) -> int:
"""simple docstring"""
A = sum(
i
for i in range(1 , UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 91 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Dict ):
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def __a ( self : Union[str, Any] ):
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def __a ( self : int ):
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def __a ( self : Dict ):
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def __a ( self : Union[str, Any] ):
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def __a ( self : Any ):
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def __a ( self : Union[str, Any] ):
A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def __a ( self : Optional[Any] ):
# pass variant but use the non-variant filenames
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def __a ( self : Dict ):
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def __a ( self : Dict ):
A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def __a ( self : Any ):
# pass variant but use the non-variant filenames
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def __a ( self : List[str] ):
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 91 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCamelCase : Any = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase = get_sagemaker_input()
else:
lowercase = get_cluster_input()
return config
def UpperCAmelCase_ ( lowerCAmelCase_=None ):
"""simple docstring"""
if subparsers is not None:
lowercase = subparsers.add_parser("config" , description=lowerCAmelCase_ )
else:
lowercase = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase_ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = get_user_input()
if args.config_file is not None:
lowercase = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
lowercase = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(f'accelerate configuration saved at {config_file}' )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = config_command_parser()
lowercase = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 310 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[Any] = 256
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = ['''melgan''']
def __init__(self : Optional[Any] , A__ : SpectrogramNotesEncoder , A__ : SpectrogramContEncoder , A__ : TaFilmDecoder , A__ : DDPMScheduler , A__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase = math.log(1e-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ , continuous_encoder=A__ , decoder=A__ , scheduler=A__ , melgan=A__ , )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Any , A__ : Tuple=(-1.0, 1.0) , A__ : Any=False ) -> Any:
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self : Tuple , A__ : Any , A__ : List[str]=(-1.0, 1.0) , A__ : Any=False ) -> str:
lowercase , lowercase = input_range
lowercase = torch.clip(A__ , A__ , A__ ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ , encoder_inputs_mask=A__ )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ , encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self : int , A__ : int , A__ : Optional[int] , A__ : List[Any] ) -> str:
lowercase = noise_time
if not torch.is_tensor(A__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=A__ , decoder_input_tokens=A__ , decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__(self : int , A__ : List[List[int]] , A__ : Optional[torch.Generator] = None , A__ : int = 1_0_0 , A__ : bool = True , A__ : str = "numpy" , A__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__ )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ , output_range=[-1.0, 1.0] , clip=A__ )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A__ , continuous_mask=A__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=A__ , input_tokens=A__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
lowercase = self.scale_to_features(A__ , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ )
logger.info("Generated segment" , A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 310 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = str(lowercase__)
return len(lowercase__) == 9 and set(lowercase__) == set("123456789")
def lowerCamelCase_ ( ):
for base_num in range(9999 , 4999 , -1):
lowerCamelCase__ = 100002 * base_num
if is_9_pandigital(lowercase__):
return candidate
for base_num in range(333 , 99 , -1):
lowerCamelCase__ = 1002003 * base_num
if is_9_pandigital(lowercase__):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 187 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Tuple = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "gpt_neo"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Optional[int]=50257 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Any=24 , __lowerCamelCase : List[Any]=[[["global", "local"], 12]] , __lowerCamelCase : Any=16 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : List[Any]="gelu_new" , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=50256 , __lowerCamelCase : Tuple=50256 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_layers
lowerCamelCase__ = num_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = window_size
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_dropout
lowerCamelCase__ = embed_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = attention_types
lowerCamelCase__ = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : int ) -> str:
'''simple docstring'''
lowerCamelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__):
import torch
lowerCamelCase__ = input.size()
lowerCamelCase__ = len(lowercase__)
lowerCamelCase__ = shape[dimension]
lowerCamelCase__ = torch.arange(0 , lowercase__ , lowercase__)
lowerCamelCase__ = torch.div(sizedim - size , lowercase__ , rounding_mode="floor") + 1
lowerCamelCase__ = torch.arange(lowercase__) + low_indices[:min_length][:, None]
lowerCamelCase__ = [slice(lowercase__)] * rank
lowerCamelCase__ = indices
lowerCamelCase__ = input[s]
lowerCamelCase__ = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(lowercase__)
def lowerCamelCase_ ( lowercase__ , lowercase__):
import torch
lowerCamelCase__ = torch.arange(1 , lowercase__)
lowerCamelCase__ = torch.remainder(lowercase__ , lowercase__)
lowerCamelCase__ = remainders == 0
lowerCamelCase__ = candidates[divisor_indices]
lowerCamelCase__ = torch.max(lowercase__)
return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode="floor")
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
@property
def a__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
lowerCamelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a__ ( self : Any ) -> int:
'''simple docstring'''
return self._config.num_heads
def a__ ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__ = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase__ = ordered_inputs["attention_mask"].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def a__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
| 187 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase = model(_lowerCAmelCase )["""last_hidden_state"""]
lowercase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
lowercase = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 588 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __UpperCamelCase (unittest.TestCase ):
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertAlmostEqual(_lowerCAmelCase , _lowerCAmelCase , delta=_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = None
ops.enable_eager_execution_internal()
lowercase = tf.config.list_physical_devices("""CPU""" )
if len(_lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase = tf.config.list_logical_devices(device_type="""CPU""" )
lowercase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase = GradientAccumulator()
lowercase = tf.Variable([4.0, 3.0] )
lowercase , lowercase = create_optimizer(5E-5 , 10 , 5 )
lowercase = tf.Variable([0.0, 0.0] , trainable=_lowerCAmelCase )
def accumulate_on_replica(_lowerCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowerCAmelCase , _lowerCAmelCase ):
with strategy.scope():
lowercase = strategy.experimental_local_results(_lowerCAmelCase )
local_variables[0].assign(_lowerCAmelCase )
local_variables[1].assign(_lowerCAmelCase )
strategy.run(_lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCAmelCase )
def _check_local_values(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowerCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowerCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 588 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list ) -> bool:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(snake_case_ ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(snake_case_ ) == 1:
return True
UpperCAmelCase_ = series[1] - series[0]
for index in range(len(snake_case_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCAmelCase_ ( snake_case_ : list ) -> float:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(snake_case_ ) == 0:
raise ValueError("Input list must be a non empty list" )
UpperCAmelCase_ = 0
for val in series:
answer += val
return answer / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return sum(int(snake_case_ ) for c in str(abs(snake_case_ ) ) )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase_ = f"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" )
print(f"""{call:56} = {func(snake_case_ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 415 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Union[str, Any] = 9
_a : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a : List[str] = kruskal(__a ,__a )
_a : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a )
| 14 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _snake_case ( A_ : Any , A_ : Any=False ):
"""simple docstring"""
try:
a_ : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a_ : str = default
else:
# KEY is set, convert it to True or False.
try:
a_ : Union[str, Any] = strtobool(A_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__snake_case: Optional[Any] = parse_flag_from_env("RUN_SLOW", default=False)
__snake_case: str = parse_flag_from_env("RUN_REMOTE", default=False)
__snake_case: Any = parse_flag_from_env("RUN_LOCAL", default=True)
__snake_case: Dict = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
__snake_case: str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
__snake_case: Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
__snake_case: str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
__snake_case: Optional[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
__snake_case: str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
__snake_case: Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
__snake_case: Optional[Any] = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def _snake_case ( A_ : Union[str, Any] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
a_ : int = unittest.skip("""test requires faiss""" )(A_ )
return test_case
def _snake_case ( A_ : Union[str, Any] ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
a_ : str = unittest.skip("""test requires regex""" )(A_ )
return test_case
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
a_ : List[Any] = unittest.skip("""test requires elasticsearch""" )(A_ )
return test_case
def _snake_case ( A_ : int ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
a_ : str = unittest.skip("""test requires sqlalchemy""" )(A_ )
return test_case
def _snake_case ( A_ : int ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
a_ : Optional[int] = unittest.skip("""test requires PyTorch""" )(A_ )
return test_case
def _snake_case ( A_ : int ):
"""simple docstring"""
if not config.TF_AVAILABLE:
a_ : Dict = unittest.skip("""test requires TensorFlow""" )(A_ )
return test_case
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
a_ : str = unittest.skip("""test requires JAX""" )(A_ )
return test_case
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
a_ : Optional[int] = unittest.skip("""test requires Pillow""" )(A_ )
return test_case
def _snake_case ( A_ : Union[str, Any] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(A_ )
else:
return test_case
def _snake_case ( A_ : Union[str, Any] ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(A_ )
else:
return test_case
def _snake_case ( A_ : Dict ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(A_ )
else:
return test_case
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
def _require_spacy_model(A_ : List[Any] ):
try:
import spacy # noqa F401
spacy.load(A_ )
except ImportError:
return unittest.skip("""test requires spacy""" )(A_ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(A_ ) )(A_ )
else:
return test_case
return _require_spacy_model
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(A_ )
else:
return test_case
def _snake_case ( A_ : Tuple ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(A_ )
else:
return test_case
def _snake_case ( A_ : Any ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
a_ : Optional[int] = unittest.skip("""test is slow""" )(A_ )
return test_case
def _snake_case ( A_ : int ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
a_ : Any = unittest.skip("""test is local""" )(A_ )
return test_case
def _snake_case ( A_ : Dict ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
a_ : Union[str, Any] = unittest.skip("""test is packaged""" )(A_ )
return test_case
def _snake_case ( A_ : Dict ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
a_ : List[str] = unittest.skip("""test requires remote""" )(A_ )
return test_case
def _snake_case ( *A_ : List[str] ):
"""simple docstring"""
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(A_ ) and name.startswith("""test""" ):
for decorator in decorators:
a_ : Optional[int] = decorator(A_ )
setattr(cls , A_ , A_ )
return cls
return decorate
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
pass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 0
a_ = 1
a_ = 2
@contextmanager
def _snake_case ( A_ : int=OfflineSimulationMode.CONNECTION_FAILS , A_ : Union[str, Any]=1E-16 ):
"""simple docstring"""
a_ : Any = requests.Session().request
def timeout_request(A_ : List[str] , A_ : Optional[Any] , A_ : Optional[Any] , **A_ : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
a_ : str = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
a_ : Tuple = timeout
try:
return online_request(A_ , A_ , **A_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a_ : Dict = url
a_ : List[Any] = e.args[0]
a_ : Dict = (max_retry_error.args[0].replace("""10.255.255.1""" , f'''OfflineMock[{url}]''' ),)
a_ : Optional[int] = (max_retry_error,)
raise
def raise_connection_error(A_ : Optional[Any] , A_ : Any , **A_ : Any ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=A_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , A_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , A_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , A_ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _snake_case ( *A_ : Union[str, Any] , **A_ : List[str] ):
"""simple docstring"""
a_ : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A_ , **A_ ) as tmp_dir:
try:
os.chdir(A_ )
yield
finally:
os.chdir(A_ )
@contextmanager
def _snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
a_ : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
a_ : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _snake_case ( A_ : Any , A_ : Optional[Any] ):
"""simple docstring"""
return deepcopy(A_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(A_ ).integers(0 , 100 , 10 ).tolist()
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(A_ : str , *A_ : Optional[Any] , **A_ : Union[str, Any] ):
try:
return func(*A_ , **A_ )
except HTTPError as err:
if str(A_ ).startswith("""500""" ) or str(A_ ).startswith("""502""" ):
pytest.xfail(str(A_ ) )
raise err
return decorator.decorator(_wrapper , A_ )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Dict = returncode
a_ : Tuple = stdout
a_ : List[str] = stderr
async def _snake_case ( A_ : Optional[Any] , A_ : Tuple ):
"""simple docstring"""
while True:
a_ : Optional[int] = await stream.readline()
if line:
callback(A_ )
else:
break
async def _snake_case ( A_ : Optional[int] , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : Tuple=None , A_ : int=False , A_ : int=False ):
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(A_ ) )
a_ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a_ : Any = []
a_ : Optional[Any] = []
def tee(A_ : Dict , A_ : List[Any] , A_ : List[Any] , A_ : Dict="" ):
a_ : List[str] = line.decode("""utf-8""" ).rstrip()
sink.append(A_ )
if not quiet:
print(A_ , A_ , file=A_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda A_ : tee(A_ , A_ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda A_ : tee(A_ , A_ , sys.stderr , label="""stderr:""" ) ),
] , timeout=A_ , )
return _RunOutput(await p.wait() , A_ , A_ )
def _snake_case ( A_ : List[Any] , A_ : Optional[Any]=None , A_ : Optional[Any]=None , A_ : Union[str, Any]=180 , A_ : int=False , A_ : Tuple=True ):
"""simple docstring"""
a_ : Optional[Any] = asyncio.get_event_loop()
a_ : Any = loop.run_until_complete(
_stream_subprocess(A_ , env=A_ , stdin=A_ , timeout=A_ , quiet=A_ , echo=A_ ) )
a_ : Union[str, Any] = """ """.join(A_ )
if result.returncode > 0:
a_ : Union[str, Any] = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _snake_case ( ):
"""simple docstring"""
a_ : Optional[int] = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
a_ : Union[str, Any] = re.sub(R"""^gw""" , """""" , A_ , 0 , re.M )
return int(A_ )
def _snake_case ( ):
"""simple docstring"""
a_ : Tuple = 2_9500
a_ : str = pytest_xdist_worker_id()
return port + uniq_delta
| 577 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_snake_case = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS)
_snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_snake_case = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_lowerCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , SCREAMING_SNAKE_CASE , )
is not None
):
_lowerCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_lowerCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_lowerCAmelCase = True
if not attribute_used:
_lowerCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCAmelCase = True
elif attribute.endswith('_token_id' ):
_lowerCAmelCase = True
# configuration class specific cases
if not case_allowed:
_lowerCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_lowerCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_lowerCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCAmelCase = {}
if len(config_class.attribute_map ) > 0:
_lowerCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCAmelCase = inspect.getsourcefile(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for fn in os.listdir(SCREAMING_SNAKE_CASE ) if fn.startswith('modeling_' )]
# Get the source code strings
_lowerCAmelCase = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
_lowerCAmelCase = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
_lowerCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(SCREAMING_SNAKE_CASE )
and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and inspect.getmodule(SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_lowerCAmelCase = check_config_attributes_being_used(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = unused_attributes
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 491 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] ):
"""simple docstring"""
_lowerCAmelCase = []
if len(SCREAMING_SNAKE_CASE ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = nums.pop(0 )
_lowerCAmelCase = permute(SCREAMING_SNAKE_CASE )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE )
result.extend(SCREAMING_SNAKE_CASE )
nums.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( SCREAMING_SNAKE_CASE: Any ):
"""simple docstring"""
def backtrack(SCREAMING_SNAKE_CASE: Tuple ):
if start == len(SCREAMING_SNAKE_CASE ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start] # backtrack
_lowerCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_snake_case = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 491 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BICUBIC , A__ = True , A__ = True , A__ = 1 / 255 , A__ = None , A__ = True , A__ = None , A__ = None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: Dict = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_: Optional[Any] = get_size_dict(A__ )
UpperCAmelCase_: str = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_: Any = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
UpperCAmelCase_: List[Any] = do_resize
UpperCAmelCase_: Dict = do_rescale
UpperCAmelCase_: Tuple = do_normalize
UpperCAmelCase_: Any = do_center_crop
UpperCAmelCase_: str = crop_size
UpperCAmelCase_: int = size
UpperCAmelCase_: Union[str, Any] = resample
UpperCAmelCase_: str = rescale_factor
UpperCAmelCase_: str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_: List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self , A__ , A__ , A__ = PILImageResampling.BILINEAR , A__ = None , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Any = get_size_dict(A__ )
if "shortest_edge" in size:
UpperCAmelCase_: Tuple = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_: List[str] = (size["height"], size["width"])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ , A__ = None , **A__ ):
"""simple docstring"""
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def snake_case_ ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: int = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
UpperCAmelCase_: int = resample if resample is not None else self.resample
UpperCAmelCase_: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_: Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase_: List[Any] = size if size is not None else self.size
UpperCAmelCase_: Optional[int] = get_size_dict(A__ )
if not is_batched(A__ ):
UpperCAmelCase_: List[str] = [images]
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_: List[Any] = [to_numpy_array(A__ ) for image in images]
if do_resize:
UpperCAmelCase_: Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
UpperCAmelCase_: Optional[Any] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
UpperCAmelCase_: Optional[int] = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
UpperCAmelCase_: List[str] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
UpperCAmelCase_: Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
UpperCAmelCase_: List[str] = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ ) | 137 |
from __future__ import annotations
from fractions import Fraction
def lowercase ( _a ,_a ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowercase ( _a ) -> list[str]:
UpperCAmelCase_: int = []
UpperCAmelCase_: Any = 11
UpperCAmelCase_: Union[str, Any] = int("1" + "0" * digit_len )
for num in range(_a ,_a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_a ,_a ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
UpperCAmelCase_: Any = 10
return solutions
def lowercase ( _a = 2 ) -> int:
UpperCAmelCase_: Dict = 1.0
for fraction in fraction_list(_a ):
UpperCAmelCase_: int = Fraction(_a )
result *= frac.denominator / frac.numerator
return int(_a )
if __name__ == "__main__":
print(solution()) | 137 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
__UpperCAmelCase = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = 'retribert'
def __init__( self ,__SCREAMING_SNAKE_CASE=30522 ,__SCREAMING_SNAKE_CASE=768 ,__SCREAMING_SNAKE_CASE=8 ,__SCREAMING_SNAKE_CASE=12 ,__SCREAMING_SNAKE_CASE=3072 ,__SCREAMING_SNAKE_CASE="gelu" ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=512 ,__SCREAMING_SNAKE_CASE=2 ,__SCREAMING_SNAKE_CASE=0.02 ,__SCREAMING_SNAKE_CASE=1e-12 ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE=128 ,__SCREAMING_SNAKE_CASE=0 ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = share_encoders
SCREAMING_SNAKE_CASE : str = projection_dim
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = BartTokenizer
def __init__( self ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE="replace" ,__SCREAMING_SNAKE_CASE="<s>" ,__SCREAMING_SNAKE_CASE="</s>" ,__SCREAMING_SNAKE_CASE="</s>" ,__SCREAMING_SNAKE_CASE="<s>" ,__SCREAMING_SNAKE_CASE="<unk>" ,__SCREAMING_SNAKE_CASE="<pad>" ,__SCREAMING_SNAKE_CASE="<mask>" ,__SCREAMING_SNAKE_CASE=False ,__SCREAMING_SNAKE_CASE=True ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,errors=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE ,trim_offsets=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE ,pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : int = 'post_processor'
SCREAMING_SNAKE_CASE : List[Any] = getattr(self.backend_tokenizer ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : str = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state['cls'] )
SCREAMING_SNAKE_CASE : Tuple = False
if state.get('add_prefix_space' ,__SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : Any = True
if state.get('trim_offsets' ,__SCREAMING_SNAKE_CASE ) != trim_offsets:
SCREAMING_SNAKE_CASE : Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE : List[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE ,state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[int] = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
@property
def __a ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = AddedToken(__SCREAMING_SNAKE_CASE ,lstrip=__SCREAMING_SNAKE_CASE ,rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __a ( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('is_split_into_words' ,__SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = kwargs.get('is_split_into_words' ,__SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 220 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : Optional[Any] = 2
class _snake_case :
def __init__( self , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = bos, unk, pad, eos
lowercase__ : List[str] = []
lowercase__ : Any = []
lowercase__ : Dict = {}
lowercase__ : Tuple = self.add_symbol(__snake_case)
lowercase__ : Any = self.add_symbol(__snake_case)
lowercase__ : Optional[Any] = self.add_symbol(__snake_case)
lowercase__ : Dict = self.add_symbol(__snake_case)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__snake_case)
lowercase__ : Tuple = len(self.symbols)
def __eq__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
'''simple docstring'''
return len(self.symbols)
def __contains__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return sym in self.indices
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = cls()
d.add_from_file(__snake_case)
return d
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False):
'''simple docstring'''
if word in self.indices and not overwrite:
lowercase__ : Union[str, Any] = self.indices[word]
lowercase__ : Tuple = self.count[idx] + n
return idx
else:
lowercase__ : Optional[Any] = len(self.symbols)
lowercase__ : List[Any] = idx
self.symbols.append(__snake_case)
self.count.append(__snake_case)
return idx
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return 0
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(__snake_case , __snake_case):
try:
with open(__snake_case , """r""" , encoding="""utf-8""") as fd:
self.add_from_file(__snake_case)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__snake_case))
return
lowercase__ : List[Any] = f.readlines()
lowercase__ : Optional[Any] = self._load_meta(__snake_case)
for line in lines[indices_start_line:]:
try:
lowercase__ : Optional[int] = line.rstrip().rsplit(""" """ , 1)
if field == "#fairseq:overwrite":
lowercase__ : List[str] = True
lowercase__ : Tuple = line.rsplit(""" """ , 1)
else:
lowercase__ : Tuple = False
lowercase__ : int = int(__snake_case)
lowercase__ : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__snake_case))
self.add_symbol(__snake_case , n=__snake_case , overwrite=__snake_case)
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""")
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
lowercase__ : str = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : List[Any] = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , """checkpoint.pt""" )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
lowercase__ : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase__ : List[Any] = chkpt["""cfg"""]["""model"""]
# dicts
lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , """dict.txt""" )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
lowercase__ : Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
lowercase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
lowercase__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , """bpecodes""" )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
lowercase__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# model config
lowercase__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
lowercase__ : Union[str, Any] = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
lowercase__ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase__ : str = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
lowercase__ : Any = chkpt["""model"""]
# remove unneeded keys
lowercase__ : List[Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase__ : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowercase__ : Optional[Any] = model_state_dict.pop(SCREAMING_SNAKE_CASE__ )
else:
lowercase__ : Dict = model_state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase__ : List[str] = BioGptConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase__ : Tuple = BioGptForCausalLM(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ )
# save
lowercase__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = KandinskyVaaPipeline
A_ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
]
A_ : Optional[int] = ["""image_embeds""", """negative_image_embeds"""]
A_ : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ : Dict = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(**__snake_case )
return model
@property
def UpperCAmelCase_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.dummy_unet
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_movq
_SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
_SCREAMING_SNAKE_CASE : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase_ ( self , __snake_case , __snake_case=0 ):
_SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
if str(__snake_case ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(__snake_case )
else:
_SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = """cpu"""
_SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class(**__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : Any = pipe(**self.get_dummy_inputs(__snake_case ) )
_SCREAMING_SNAKE_CASE : Tuple = output.images
_SCREAMING_SNAKE_CASE : Dict = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
_SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : Dict = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
_SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : Dict = """red cat, 4k photo"""
_SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = pipeline(
image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 533 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : Tuple = [True] * n
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : List[Any] = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = index + i
SCREAMING_SNAKE_CASE_ : Optional[Any] = [2]
for i in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE )
return primes
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 9999_6666_3333 ) -> Any:
SCREAMING_SNAKE_CASE_ : Any = math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) + 100
SCREAMING_SNAKE_CASE_ : Optional[Any] = prime_sieve(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : int = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : List[str] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = last_prime**2
SCREAMING_SNAKE_CASE_ : int = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : int = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : int = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 706 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__: Dict = {"vocab_file": "spiece.model"}
lowerCAmelCase__: Dict = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowerCAmelCase__: Dict = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowerCAmelCase__: List[str] = "▁"
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ : Any = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
SCREAMING_SNAKE_CASE_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : str = remove_space
SCREAMING_SNAKE_CASE_ : Any = keep_accents
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def __A ( self ):
return len(self.sp_model )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return state
def __setstate__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , __lowerCAmelCase ):
if self.remove_space:
SCREAMING_SNAKE_CASE_ : Any = ' '.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ : List[str] = inputs
SCREAMING_SNAKE_CASE_ : str = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ : Dict = unicodedata.normalize('NFKD' , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = ''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.lower()
return outputs
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ : Any = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def __A ( self , __lowerCAmelCase ):
return self.sp_model.PieceToId(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
return self.sp_model.IdToPiece(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = ''
SCREAMING_SNAKE_CASE_ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 311 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCamelCase : Optional[Any] = 5
# Realm tok
__lowerCamelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__lowerCamelCase : str = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def lowercase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[str] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Optional[int] = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Any = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowercase_ ( self ) -> str:
__lowerCamelCase : Union[str, Any] = self.get_config()
__lowerCamelCase : int = self.get_dummy_retriever()
__lowerCamelCase : Any = retriever.tokenizer
__lowerCamelCase : List[str] = np.array([0, 3] , dtype='long' )
__lowerCamelCase : Union[str, Any] = tokenizer(['Test question'] ).input_ids
__lowerCamelCase : Optional[Any] = tokenizer(
['the fourth'] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
__lowerCamelCase : int = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Optional[Any] = self.get_config()
__lowerCamelCase : Any = self.get_dummy_retriever()
__lowerCamelCase : Optional[int] = retriever.tokenizer
__lowerCamelCase : Any = np.array([0, 3, 5] , dtype='long' )
__lowerCamelCase : Optional[Any] = tokenizer(['Test question'] ).input_ids
__lowerCamelCase : List[str] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
__lowerCamelCase : Union[str, Any] = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__lowerCamelCase : List[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__lowerCamelCase : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCamelCase : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 13 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : str = bs[:]
lowercase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Optional[int] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
lowercase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Dict = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : Tuple , a : Tuple , a : int="replace" , a : Optional[int]="<s>" , a : Tuple="</s>" , a : Tuple="</s>" , a : Tuple="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : List[str]="<mask>" , a : Tuple=False , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Any = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Dict = bigram
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = 0
while i < len(a ):
try:
lowercase_ : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Any = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Any = tuple(a )
lowercase_ : List[str] = new_word
if len(a ) == 1:
break
else:
lowercase_ : Union[str, Any] = get_pairs(a )
lowercase_ : List[str] = " ".join(a )
lowercase_ : Optional[int] = word
return word
def lowerCAmelCase__ ( self : Any , a : str ):
'''simple docstring'''
lowercase_ : Dict = []
for token in re.findall(self.pat , a ):
lowercase_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Tuple , a : Dict ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : int , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = "".join(a )
lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Dict = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Optional[Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , a : Any , a : int=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : str = " " + text
return (text, kwargs)
| 620 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''input_values''', '''padding_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 2_4000 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = None , UpperCAmelCase_ : float = None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
@property
def _A ( self : Optional[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A ( self : List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Dict , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Optional[Union[bool, str, PaddingStrategy]] = None , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Tuple = bool(
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : str = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCAmelCase_ ).T]
# verify inputs are valid
for idx, example in enumerate(UpperCAmelCase_ ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
SCREAMING_SNAKE_CASE : Tuple = min(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE : Dict = int(np.floor(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
SCREAMING_SNAKE_CASE : List[Any] = max(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
SCREAMING_SNAKE_CASE : Dict = "max_length"
else:
SCREAMING_SNAKE_CASE : List[str] = input_values
# normal padding on batch
if padded_inputs is None:
SCREAMING_SNAKE_CASE : Tuple = self.pad(
UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if padding:
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.pop("attention_mask" )
SCREAMING_SNAKE_CASE : List[str] = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = example[..., None]
input_values.append(example.T )
SCREAMING_SNAKE_CASE : Tuple = input_values
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Any = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
| 704 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=18 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : Any = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"height": 18, "width": 20}
SCREAMING_SNAKE_CASE : Optional[Any] = do_thumbnail
SCREAMING_SNAKE_CASE : Tuple = do_align_axis
SCREAMING_SNAKE_CASE : List[str] = do_pad
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean
SCREAMING_SNAKE_CASE : Any = image_std
def _A ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = DonutImageProcessor if is_vision_available() else None
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = DonutImageProcessingTester(self )
@property
def _A ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_thumbnail" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_align_long_axis" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_pad" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def _A ( self : Dict ):
pass
@is_flaky()
def _A ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _A ( self : List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _A ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 488 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase_ = ["""small""", """medium""", """large"""]
UpperCamelCase_ = """lm_head.decoder.weight"""
UpperCamelCase_ = """lm_head.weight"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
lowercase : List[str] =torch.load(__magic_name__ )
lowercase : List[str] =d.pop(__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
UpperCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase_ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
UpperCamelCase_ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 92 |
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : Any = name
lowerCamelCase_ : Optional[Any] = value
lowerCamelCase_ : str = weight
def __repr__( self : int ) -> Optional[int]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.value
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.name
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return self.weight
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return self.value / self.weight
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = []
for i in range(len(__UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ , lowerCamelCase_ : Tuple = 0.0, 0.0
for i in range(len(__UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __a ( ) -> List[str]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Dict = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ : Tuple = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE_ : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE_ : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(UpperCAmelCase_ )
A__ = np.array(UpperCAmelCase_ )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = pred_label[mask]
A__ = np.array(UpperCAmelCase_ )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ , A__ , A__ , A__ = intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ , A__ , A__ , A__ = total_intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(UpperCAmelCase_ )
A__ = np.nanmean(UpperCAmelCase_ )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def UpperCamelCase ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: bool , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[Dict[int, int]] = None , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = mean_iou(
results=UpperCamelCase , gt_seg_maps=UpperCamelCase , num_labels=UpperCamelCase , ignore_index=UpperCamelCase , nan_to_num=UpperCamelCase , label_map=UpperCamelCase , reduce_labels=UpperCamelCase , )
return iou_result
| 500 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Dict = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
import os
import numpy
import onnx
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : List[str] = a.name
A_ : int = b.name
A_ : int = """"""
A_ : Union[str, Any] = """"""
A_ : Tuple = a == b
A_ : Optional[Any] = name_a
A_ : int = name_b
return res
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase ,_lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,_lowerCAmelCase ,_lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g ,_lowerCAmelCase ,_lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,_lowerCAmelCase ,_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : List[Any] = list(model.graph.initializer )
A_ : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A_ : Optional[int] = inits[i].name
A_ : Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,_lowerCAmelCase ,_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = os.path.dirname(_lowerCAmelCase )
A_ : int = os.path.basename(_lowerCAmelCase )
A_ : Optional[int] = onnx.load(os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) )
A_ : Union[str, Any] = list(model.graph.initializer )
A_ : Tuple = set()
A_ : Tuple = {}
A_ : Optional[int] = []
A_ : List[Any] = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
A_ : Any = inits[j].data_type
A_ : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ ,_lowerCAmelCase )
total_reduced_size += mem_size
A_ : Optional[int] = inits[i].name
A_ : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
A_ : Any = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ ,total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 ,"""GB""" )
A_ : List[str] = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
A_ : Optional[Any] = """optimized_""" + model_file_name
A_ : Union[str, Any] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
onnx.save(_lowerCAmelCase ,_lowerCAmelCase )
return new_model
| 569 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = self.dummy_uncond_unet
lowerCamelCase__ = KarrasVeScheduler()
lowerCamelCase__ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="numpy" ).images
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = "google/ncsnpp-celebahq-256"
lowerCamelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = KarrasVeScheduler()
lowerCamelCase__ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="numpy" ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 718 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __a , unittest.TestCase ):
__a = PhobertTokenizer
__a = False
def _lowerCamelCase ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ["#version: 0.2", "l à</w>"]
lowerCamelCase__ = {"unk_token": "<unk>"}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def _lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowerCamelCase__ = "Tôi là VinAI Research"
lowerCamelCase__ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = "Tôi là VinAI Research"
lowerCamelCase__ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 274 | 0 |
import sys
import turtle
def _snake_case (__lowercase , __lowercase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
snake_case__ : Tuple = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
snake_case__ : Optional[Any] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 23 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Optional[int] = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 715 | """simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = data
UpperCamelCase__ = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def lowerCamelCase__ ( lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple ) -> int:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0Xffff_ffff
def lowerCamelCase__ ( self :List[str] ) -> str:
"""simple docstring"""
UpperCamelCase__ = b"\x80" + b"\x00" * (6_3 - (len(self.data ) + 8) % 6_4)
UpperCamelCase__ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowerCamelCase__ ( self :Optional[Any] ) -> Any:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :Dict ) -> Any:
"""simple docstring"""
UpperCamelCase__ = list(struct.unpack(">16L" , lowerCamelCase_ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
UpperCamelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def lowerCamelCase__ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = self.padding()
UpperCamelCase__ = self.split_blocks()
for block in self.blocks:
UpperCamelCase__ = self.expand_block(lowerCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
UpperCamelCase__ = (b & c) | ((~b) & d)
UpperCamelCase__ = 0X5a82_7999
elif 2_0 <= i < 4_0:
UpperCamelCase__ = b ^ c ^ d
UpperCamelCase__ = 0X6ed9_eba1
elif 4_0 <= i < 6_0:
UpperCamelCase__ = (b & c) | (b & d) | (c & d)
UpperCamelCase__ = 0X8f1b_bcdc
elif 6_0 <= i < 8_0:
UpperCamelCase__ = b ^ c ^ d
UpperCamelCase__ = 0Xca62_c1d6
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (
self.rotate(lowerCamelCase_ , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(lowerCamelCase_ , 3_0 ),
c,
d,
)
UpperCamelCase__ = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = b"Test String"
assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = bytes(_snake_case , "utf-8" )
print(SHAaHash(_snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | 304 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = ['''pixel_values''']
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = 1 / 255 , a_ = True , a_ = None , a_ = True , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[int] = size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ : str = get_size_dict(a_ , default_to_square=a_ )
lowerCamelCase_ : Any = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowerCamelCase_ : Tuple = get_size_dict(a_ , param_name="crop_size" )
lowerCamelCase_ : Optional[Any] = do_resize
lowerCamelCase_ : List[str] = size
lowerCamelCase_ : List[Any] = resample
lowerCamelCase_ : List[str] = do_rescale
lowerCamelCase_ : Optional[int] = rescale_factor
lowerCamelCase_ : str = do_center_crop
lowerCamelCase_ : str = crop_size
lowerCamelCase_ : List[Any] = do_flip_channel_order
def _UpperCamelCase ( self , a_ , a_ , a_ = PIL.Image.BILINEAR , a_ = None , **a_ , ):
lowerCamelCase_ : Optional[Any] = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase_ : Dict = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ = None , **a_ , ):
lowerCamelCase_ : str = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ = None , **a_ , ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
return flip_channel_order(a_ , data_format=a_ )
def _UpperCamelCase ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
lowerCamelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : List[str] = resample if resample is not None else self.resample
lowerCamelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : List[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCamelCase_ : int = size if size is not None else self.size
lowerCamelCase_ : List[Any] = get_size_dict(a_ , default_to_square=a_ )
lowerCamelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Optional[Any] = get_size_dict(a_ , param_name="crop_size" )
lowerCamelCase_ : Optional[Any] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ : List[str] = [to_numpy_array(a_ ) for image in images]
if do_resize:
lowerCamelCase_ : List[Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
lowerCamelCase_ : Optional[Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
lowerCamelCase_ : Tuple = [self.rescale(image=a_ , scale=a_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCamelCase_ : Tuple = [self.flip_channel_order(image=a_ ) for image in images]
lowerCamelCase_ : List[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
lowerCamelCase_ : str = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
lowerCamelCase_ : int = target_sizes.numpy()
lowerCamelCase_ : List[Any] = []
for idx in range(len(a_ ) ):
lowerCamelCase_ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a_ )
lowerCamelCase_ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
lowerCamelCase_ : Union[str, Any] = logits.argmax(dim=1 )
lowerCamelCase_ : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 250 |
import os
def __magic_name__ ( lowerCAmelCase_ = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase_) , lowerCAmelCase_)) as input_file:
lowerCamelCase_ : Dict = [
[int(lowerCAmelCase_) for element in line.split(",")]
for line in input_file.readlines()
]
lowerCamelCase_ : str = len(lowerCAmelCase_)
lowerCamelCase_ : Any = len(matrix[0])
lowerCamelCase_ : Optional[Any] = [[-1 for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
lowerCamelCase_ : Union[str, Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase_):
for i in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_):
lowerCamelCase_ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
lowerCamelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 250 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 329 |
'''simple docstring'''
lowerCAmelCase__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : List[Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCAmelCase : Optional[int] = year // 100
__UpperCAmelCase : int = (5 * (century % 4) + 2) % 7
__UpperCAmelCase : Optional[Any] = year % 100
__UpperCAmelCase : int = centurian % 12
__UpperCAmelCase : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCAmelCase : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCAmelCase : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 164 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[Any] = """van"""
def __init__( self , a=224 , a=3 , a=[7, 3, 3, 3] , a=[4, 2, 2, 2] , a=[64, 128, 320, 512] , a=[3, 3, 12, 3] , a=[8, 8, 4, 4] , a="gelu" , a=0.02 , a=1e-6 , a=1e-2 , a=0.0 , a=0.0 , **a , ):
super().__init__(**a)
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = patch_sizes
lowercase__ : str = strides
lowercase__ : List[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : int = mlp_ratios
lowercase__ : Tuple = hidden_act
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : str = layer_scale_init_value
lowercase__ : Tuple = drop_path_rate
lowercase__ : Tuple = dropout_rate
| 164 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self : Any , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : int = 8 , **__A : Union[str, Any] , ):
super().__init__(**__A )
__A : List[str] = do_rescale
__A : Optional[int] = rescale_factor
__A : Dict = do_pad
__A : Any = pad_size
def lowerCAmelCase_ ( self : Optional[int] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : int , __A : Optional[Union[str, ChannelDimension]] = None ):
__A , __A : Any = get_image_size(__A )
__A : str = (old_height // size + 1) * size - old_height
__A : Optional[Any] = (old_width // size + 1) * size - old_width
return pad(__A , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__A )
def lowerCAmelCase_ ( self : Dict , __A : ImageInput , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : Dict = do_rescale if do_rescale is not None else self.do_rescale
__A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
__A : Dict = pad_size if pad_size is not None else self.pad_size
__A : Tuple = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__A : Optional[int] = [to_numpy_array(__A ) for image in images]
if do_rescale:
__A : Tuple = [self.rescale(image=__A , scale=__A ) for image in images]
if do_pad:
__A : int = [self.pad(__A , size=__A ) for image in images]
__A : Optional[int] = [to_channel_dimension_format(__A , __A ) for image in images]
__A : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.0 , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = "layer_norm" , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__()
_a = only_cross_attention
_a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
_a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_a = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
_a = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase )
else:
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
_a = Attention(
query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_a = (
AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
)
_a = Attention(
query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
_a = None
_a = None
# 3. Feed-forward
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
_a = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase )
# let chunk size default to None
_a = None
_a = 0
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# Sets chunk feed-forward
_a = chunk_size
_a = dim
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_a = self.norma(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
_a , _a , _a , _a , _a = self.norma(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype )
else:
_a = self.norma(__UpperCAmelCase )
_a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_a = self.attna(
__UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
if self.use_ada_layer_norm_zero:
_a = gate_msa.unsqueeze(1 ) * attn_output
_a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_a = (
self.norma(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase )
)
_a = self.attna(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
_a = attn_output + hidden_states
# 3. Feed-forward
_a = self.norma(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
_a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
_a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_a = torch.cat(
[self.ff(__UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_a = self.ff(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
_a = gate_mlp.unsqueeze(1 ) * ff_output
_a = ff_output + hidden_states
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "geglu" , __UpperCAmelCase = False , ) -> List[Any]:
super().__init__()
_a = int(dim * mult )
_a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_a = GELU(__UpperCAmelCase , __UpperCAmelCase )
if activation_fn == "gelu-approximate":
_a = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
_a = GEGLU(__UpperCAmelCase , __UpperCAmelCase )
elif activation_fn == "geglu-approximate":
_a = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase )
_a = nn.ModuleList([] )
# project in
self.net.append(__UpperCAmelCase )
# project dropout
self.net.append(nn.Dropout(__UpperCAmelCase ) )
# project out
self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
for module in self.net:
_a = module(__UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "none" ) -> Union[str, Any]:
super().__init__()
_a = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
_a = approximate
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = self.proj(__UpperCAmelCase )
_a = self.gelu(__UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
_a = nn.Linear(__UpperCAmelCase , dim_out * 2 )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_a , _a = self.proj(__UpperCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
super().__init__()
_a = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
_a = self.proj(__UpperCAmelCase )
return x * torch.sigmoid(1.702 * x )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
_a = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
_a = nn.SiLU()
_a = nn.Linear(__UpperCAmelCase , embedding_dim * 2 )
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = self.linear(self.silu(self.emb(__UpperCAmelCase ) ) )
_a , _a = torch.chunk(__UpperCAmelCase , 2 )
_a = self.norm(__UpperCAmelCase ) * (1 + scale) + shift
return x
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
super().__init__()
_a = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase )
_a = nn.SiLU()
_a = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase )
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1e-6 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
_a = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase ) ) )
_a , _a , _a , _a , _a , _a = emb.chunk(6 , dim=1 )
_a = self.norm(__UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 1e-5 ) -> Any:
super().__init__()
_a = num_groups
_a = eps
if act_fn is None:
_a = None
else:
_a = get_activation(__UpperCAmelCase )
_a = nn.Linear(__UpperCAmelCase , out_dim * 2 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if self.act:
_a = self.act(__UpperCAmelCase )
_a = self.linear(__UpperCAmelCase )
_a = emb[:, :, None, None]
_a , _a = emb.chunk(2 , dim=1 )
_a = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps )
_a = x * (1 + scale) + shift
return x | 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 285 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "xlm-roberta-xl"
def __init__( self , a__=250_880 , a__=2_560 , a__=36 , a__=32 , a__=10_240 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=0.0_2 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 400 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bart"
lowerCAmelCase_ : Optional[Any] = ["past_key_values"]
lowerCAmelCase_ : Tuple = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=50_265 , a__=1_024 , a__=12 , a__=4_096 , a__=16 , a__=12 , a__=4_096 , a__=16 , a__=0.0 , a__=0.0 , a__="gelu" , a__=1_024 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=0.0 , a__=False , a__=True , a__=3 , a__=1 , a__=0 , a__=2 , a__=True , a__=2 , a__=2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = classifier_dropout
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a__ ):
snake_case_ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case_ = {0: "batch"}
snake_case_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case_ = {0: "batch", 1: "decoder_sequence"}
snake_case_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case_ , snake_case_ = self.num_layers
for i in range(a__ ):
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(a__ , self ).outputs
if self.use_past:
snake_case_ , snake_case_ = self.num_layers
for i in range(a__ ):
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
snake_case_ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**a__ , **a__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_ , snake_case_ = common_inputs["input_ids"].shape
snake_case_ = common_inputs["decoder_input_ids"].shape[1]
snake_case_ , snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a__ , a__ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ , snake_case_ = self.num_layers
snake_case_ = min(a__ , a__ )
snake_case_ = max(a__ , a__ ) - min_num_layers
snake_case_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a__ , a__ ):
common_inputs["past_key_values"].append((torch.zeros(a__ ), torch.zeros(a__ )) )
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_ , snake_case_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ , snake_case_ = self.num_layers
snake_case_ , snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs["attention_mask"].dtype
snake_case_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
snake_case_ = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(a__ )
]
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(a__ )
snake_case_ = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(a__ , return_tensors=a__ ) )
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
elif self.task == "causal-lm":
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
else:
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(a__ , a__ , a__ , a__ )
else:
snake_case_ = super(a__ , self )._flatten_past_key_values_(
a__ , a__ , a__ , a__ )
| 400 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _a( UpperCamelCase__ : str, UpperCamelCase__ : float | Decimal, UpperCamelCase__ : float = 1_0**-1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =a
while True:
SCREAMING_SNAKE_CASE__ : Tuple =Decimal(a_ ) - (
Decimal(eval(a_ ) ) / Decimal(eval(str(diff(a_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a_ ) ) < precision: # noqa: S307
return float(a_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase ) | 665 | 0 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
SCREAMING_SNAKE_CASE_ : Tuple =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
import os
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
with open(os.path.dirname(UpperCAmelCase_ ) + '''/p022_names.txt''' ) as file:
SCREAMING_SNAKE_CASE_ : Any =str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ : List[Any] =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
SCREAMING_SNAKE_CASE_ : Optional[int] =0
SCREAMING_SNAKE_CASE_ : Optional[int] =0
for i, name in enumerate(UpperCAmelCase_ ):
for letter in name:
name_score += ord(UpperCAmelCase_ ) - 6_4
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ : Any =0
return total_score
if __name__ == "__main__":
print(solution())
| 443 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict=1024 ) -> Any:
SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = list(zip(snake_case_ , snake_case_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted_examples[0]
def is_too_big(snake_case_ : int ):
return tok(snake_case_ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE : Optional[int] = new_src + ' ' + src
SCREAMING_SNAKE_CASE : Dict = new_tgt + ' ' + tgt
if is_too_big(snake_case_ ) or is_too_big(snake_case_ ): # cant fit, finalize example
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
SCREAMING_SNAKE_CASE : str = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE : Tuple = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Any , snake_case_ : Path , snake_case_ : Optional[int] , snake_case_ : int ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = Path(snake_case_ )
save_path.mkdir(exist_ok=snake_case_ )
for split in ["train"]:
SCREAMING_SNAKE_CASE : Any = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
SCREAMING_SNAKE_CASE : Dict = [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
SCREAMING_SNAKE_CASE : int = [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
SCREAMING_SNAKE_CASE : List[Any] = pack_examples(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
print(f"""packed {split} split from {len(snake_case_ )} examples -> {len(snake_case_ )}.""" )
Path(save_path / f"""{split}.source""" ).open('w' ).write('\n'.join(snake_case_ ) )
Path(save_path / f"""{split}.target""" ).open('w' ).write('\n'.join(snake_case_ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE : str = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(snake_case_ , save_path / f"""{split}.source""" )
shutil.copyfile(snake_case_ , save_path / f"""{split}.target""" )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=snake_case_ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=snake_case_ , default=128 )
parser.add_argument('--data_dir' , type=snake_case_ )
parser.add_argument('--save_path' , type=snake_case_ )
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 701 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = 'Hello, World!'
__UpperCAmelCase = 'en_XX'
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = Path('data_bin' )
SCREAMING_SNAKE_CASE : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case_ ).parent ) , checkpoint_file=Path(snake_case_ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(snake_case_ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(snake_case_ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(snake_case_ )
SCREAMING_SNAKE_CASE : Any = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE : List[Any] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , snake_case_ )
SCREAMING_SNAKE_CASE : int = XmodForSequenceClassification(snake_case_ ) if classification_head else XmodForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE : Tuple = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE : str = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE : Any = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE : List[str] = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE : int = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE : int = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE : List[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
SCREAMING_SNAKE_CASE : int = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE : Union[str, Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
SCREAMING_SNAKE_CASE : int = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE : List[str] = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
SCREAMING_SNAKE_CASE : int = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE : Tuple = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE : Dict = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE : Tuple = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE : int = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE : Optional[int] = from_adapter.fca.weight
SCREAMING_SNAKE_CASE : Optional[int] = from_adapter.fca.bias
SCREAMING_SNAKE_CASE : Optional[int] = from_adapter.fca.weight
SCREAMING_SNAKE_CASE : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE : Any = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE : int = xmod.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE : List[Any] = xmod.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE : str = xmod.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE : int = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE : Optional[Any] = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE : Tuple = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE : Union[str, Any] = xmod.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case_ )[0]
if classification_head:
SCREAMING_SNAKE_CASE : List[str] = xmod.model.classification_heads['mnli'](xmod.extract_features(snake_case_ ) )
else:
SCREAMING_SNAKE_CASE : Any = xmod.model(snake_case_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE : Dict = torch.allclose(snake_case_ , snake_case_ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__UpperCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 220 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
_A = len(__snake_case )
for i in range(length - 1 ):
_A = i
for k in range(i + 1 , __snake_case ):
if collection[k] < collection[least]:
_A = k
if least != i:
_A , _A = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_UpperCAmelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 107 | from collections import namedtuple
import requests
from lxml import html # type: ignore
snake_case = namedtuple("covid_data", "cases deaths recovered")
def UpperCamelCase_ ( lowerCAmelCase__ = "https://www.worldometers.info/coronavirus/" ):
"""simple docstring"""
_lowerCAmelCase : int = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
snake_case = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 424 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =['image_processor']
lowerCamelCase_ ='SamImageProcessor'
def __init__( self : Optional[int] , __lowerCAmelCase : str) -> List[str]:
super().__init__(__lowerCAmelCase)
lowercase_ = self.image_processor
lowercase_ = -10
lowercase_ = self.image_processor.size["longest_edge"]
def __call__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding:
lowercase_ = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ = encoding_image_processor["original_sizes"]
if hasattr(__lowerCAmelCase , "numpy"): # Checks if Torch or TF tensor
lowercase_ = original_sizes.numpy()
lowercase_ , lowercase_ , lowercase_ = self._check_and_preprocess_points(
input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , input_boxes=__lowerCAmelCase , )
lowercase_ = self._normalize_and_convert(
__lowerCAmelCase , __lowerCAmelCase , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , input_boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , )
return encoding_image_processor
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[str]="pt" , ) -> str:
if input_points is not None:
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
lowercase_ = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , original_sizes[0]) for point in input_points
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , __lowerCAmelCase)
for point, original_size in zip(__lowerCAmelCase , __lowerCAmelCase)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowercase_ , lowercase_ = self._pad_points_and_labels(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = np.array(__lowerCAmelCase)
if input_labels is not None:
lowercase_ = np.array(__lowerCAmelCase)
if input_boxes is not None:
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
lowercase_ = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , original_sizes[0] , is_bounding_box=__lowerCAmelCase)
for box in input_boxes
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , __lowerCAmelCase , is_bounding_box=__lowerCAmelCase)
for box, original_size in zip(__lowerCAmelCase , __lowerCAmelCase)
]
lowercase_ = np.array(__lowerCAmelCase)
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(__lowerCAmelCase)
# boxes batch size of 1 by default
lowercase_ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase)
# boxes batch size of 1 by default
lowercase_ = tf.expand_dims(__lowerCAmelCase , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(__lowerCAmelCase)
# point batch size of 1 by default
lowercase_ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase)
# point batch size of 1 by default
lowercase_ = tf.expand_dims(__lowerCAmelCase , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"input_points": input_points})
if input_labels is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(__lowerCAmelCase)
# point batch size of 1 by default
lowercase_ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase)
# point batch size of 1 by default
lowercase_ = tf.expand_dims(__lowerCAmelCase , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels})
return encoding_image_processor
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any]) -> Dict:
lowercase_ = max([point.shape[0] for point in input_points])
lowercase_ = []
for i, point in enumerate(__lowerCAmelCase):
if point.shape[0] != expected_nb_points:
lowercase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowercase_ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(__lowerCAmelCase)
lowercase_ = processed_input_points
return input_points, input_labels
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple=False) -> np.ndarray:
lowercase_ , lowercase_ = original_size
lowercase_ , lowercase_ = self.image_processor._get_preprocess_shape(__lowerCAmelCase , longest_edge=__lowerCAmelCase)
lowercase_ = deepcopy(__lowerCAmelCase).astype(__lowerCAmelCase)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 2 , 2)
lowercase_ = coords[..., 0] * (new_w / old_w)
lowercase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 4)
return coords
def __UpperCAmelCase ( self : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=None , ) -> int:
if input_points is not None:
if hasattr(__lowerCAmelCase , "numpy"): # Checks for TF or Torch tensor
lowercase_ = input_points.numpy().tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase) or not isinstance(input_points[0] , __lowerCAmelCase):
raise ValueError("Input points must be a list of list of floating points.")
lowercase_ = [np.array(__lowerCAmelCase) for input_point in input_points]
else:
lowercase_ = None
if input_labels is not None:
if hasattr(__lowerCAmelCase , "numpy"):
lowercase_ = input_labels.numpy().tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase) or not isinstance(input_labels[0] , __lowerCAmelCase):
raise ValueError("Input labels must be a list of list integers.")
lowercase_ = [np.array(__lowerCAmelCase) for label in input_labels]
else:
lowercase_ = None
if input_boxes is not None:
if hasattr(__lowerCAmelCase , "numpy"):
lowercase_ = input_boxes.numpy().tolist()
if (
not isinstance(__lowerCAmelCase , __lowerCAmelCase)
or not isinstance(input_boxes[0] , __lowerCAmelCase)
or not isinstance(input_boxes[0][0] , __lowerCAmelCase)
):
raise ValueError("Input boxes must be a list of list of list of floating points.")
lowercase_ = [np.array(__lowerCAmelCase).astype(np.floataa) for box in input_boxes]
else:
lowercase_ = None
return input_points, input_labels, input_boxes
@property
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(__lowerCAmelCase))
def __UpperCAmelCase ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : int) -> Union[str, Any]:
return self.image_processor.post_process_masks(*__lowerCAmelCase , **__lowerCAmelCase)
| 461 | '''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase_ : Optional[Any] = "bert-base-cased"
lowerCAmelCase_ : Any = "fp16"
lowerCAmelCase_ : Union[str, Any] = "bf16"
lowerCAmelCase_ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
super().setUp()
lowercase_ = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = F'{i + 1}'
lowercase_ = strategy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = prefetch_policy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def __UpperCAmelCase ( self : Dict) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = state_dict_type
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
lowercase_ = AutoModel.from_pretrained(__lowerCAmelCase)
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ = self.dist_env.copy()
lowercase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase_ = "2000"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowercase_ = self.dist_env.copy()
lowercase_ = "TRANSFORMER_BASED_WRAP"
lowercase_ = "T5Layer"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCAmelCase) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception))
lowercase_ = self.dist_env.copy()
lowercase_ = "SIZE_BASED_WRAP"
lowercase_ = "0"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ = self.dist_env.copy()
lowercase_ = mp_dtype
with mockenv_context(**__lowerCAmelCase):
lowercase_ = Accelerator()
if mp_dtype == "fp16":
lowercase_ = torch.floataa
elif mp_dtype == "bf16":
lowercase_ = torch.bfloataa
lowercase_ = MixedPrecision(param_dtype=__lowerCAmelCase , reduce_dtype=__lowerCAmelCase , buffer_dtype=__lowerCAmelCase)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCAmelCase)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __lowerCAmelCase))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str]) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ = self.dist_env.copy()
lowercase_ = str(__lowerCAmelCase).lower()
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCAmelCase))
@require_fsdp
@require_multi_gpu
@slow
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : Optional[int]) -> str:
super().setUp()
lowercase_ = 0.82
lowercase_ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase_ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ = 160
lowercase_ = 160
lowercase_ = inspect.getfile(accelerate.test_utils)
lowercase_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
lowercase_ = os.path.join(self.test_scripts_folder , "test_performance.py")
lowercase_ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase_ = cmd.copy()
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no")
else:
cmd_config.append("--mixed_precision=fp16")
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Dict) -> Dict:
lowercase_ = os.path.join(self.test_scripts_folder , "test_checkpointing.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
if strategy != "FULL_SHARD":
continue
lowercase_ = len(__lowerCAmelCase)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
"--partial_train_epoch=1",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
lowercase_ = cmd_config[:-1]
lowercase_ = os.path.join(self.tmpdir , "epoch_0")
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Optional[int]) -> int:
lowercase_ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"])
else:
cmd_config.extend(["--mixed_precision=no"])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"])
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
| 461 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = 1 # (0 is vertical, 1 is horizontal)
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = get_dataset(_lowercase , _lowercase )
print('Processing...' )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = update_image_and_anno(_lowercase , _lowercase , _lowercase )
for index, image in enumerate(_lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase = random_chars(32 )
__UpperCamelCase = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__UpperCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , _lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(_lowercase )} with {file_name}''' )
__UpperCamelCase = []
for anno in new_annos[index]:
__UpperCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_lowercase )
with open(f'''/{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _A ( _lowercase , _lowercase ) -> tuple[list, list]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = []
for label_file in glob.glob(os.path.join(_lowercase , '*.txt' ) ):
__UpperCamelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_lowercase ) as in_file:
__UpperCamelCase = in_file.readlines()
__UpperCamelCase = os.path.join(_lowercase , f'''{label_name}.jpg''' )
__UpperCamelCase = []
for obj_list in obj_lists:
__UpperCamelCase = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def _A ( _lowercase , _lowercase , _lowercase = 1 ) -> tuple[list, list, list]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = []
for idx in range(len(_lowercase ) ):
__UpperCamelCase = []
__UpperCamelCase = img_list[idx]
path_list.append(_lowercase )
__UpperCamelCase = anno_list[idx]
__UpperCamelCase = cva.imread(_lowercase )
if flip_type == 1:
__UpperCamelCase = cva.flip(_lowercase , _lowercase )
for bbox in img_annos:
__UpperCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCamelCase = cva.flip(_lowercase , _lowercase )
for bbox in img_annos:
__UpperCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowercase )
new_imgs_list.append(_lowercase )
return new_imgs_list, new_annos_lists, path_list
def _A ( _lowercase = 32 ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__snake_case : Optional[int] = logging.getLogger(__name__)
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> Tuple:
a_ : str = self.layer[current_layer](_lowercase , _lowercase , head_mask[current_layer] )
a_ : str = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''', a_, )
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
super().__init__(_lowercase )
a_ : Tuple = BertEncoderWithPabee(_lowercase )
self.init_weights()
a_ : int = 0
a_ : Any = 0
a_ : Tuple = 0
a_ : Optional[int] = 0
def UpperCamelCase__ ( self , _lowercase ) -> Tuple:
a_ : Dict = threshold
def UpperCamelCase__ ( self , _lowercase ) -> List[Any]:
a_ : Optional[int] = patience
def UpperCamelCase__ ( self ) -> Dict:
a_ : str = 0
a_ : Optional[int] = 0
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
a_ : Optional[int] = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
a_ : Dict = input_ids.size()
elif inputs_embeds is not None:
a_ : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
a_ : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
a_ : List[str] = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a_ , a_ , a_ : int = encoder_hidden_states.size()
a_ : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a_ : Tuple = torch.ones(_lowercase , device=_lowercase )
a_ : List[Any] = self.invert_attention_mask(_lowercase )
else:
a_ : Optional[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a_ : List[Any] = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
a_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
a_ : List[Any] = embedding_output
if self.training:
a_ : Any = []
for i in range(self.config.num_hidden_layers ):
a_ : int = self.encoder.adaptive_forward(
_lowercase , current_layer=_lowercase , attention_mask=_lowercase , head_mask=_lowercase )
a_ : List[Any] = self.pooler(_lowercase )
a_ : Optional[int] = output_layers[i](output_dropout(_lowercase ) )
res.append(_lowercase )
elif self.patience == 0: # Use all layers for inference
a_ : Union[str, Any] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
a_ : Union[str, Any] = self.pooler(encoder_outputs[0] )
a_ : List[str] = [output_layers[self.config.num_hidden_layers - 1](_lowercase )]
else:
a_ : Any = 0
a_ : Dict = None
a_ : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a_ : Optional[Any] = self.encoder.adaptive_forward(
_lowercase , current_layer=_lowercase , attention_mask=_lowercase , head_mask=_lowercase )
a_ : Optional[int] = self.pooler(_lowercase )
a_ : int = output_layers[i](_lowercase )
if regression:
a_ : Dict = logits.detach()
if patient_result is not None:
a_ : Optional[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a_ : Optional[Any] = 0
else:
a_ : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
a_ : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_lowercase ) ):
patient_counter += 1
else:
a_ : Tuple = 0
a_ : Union[str, Any] = logits
if patient_counter == self.patience:
break
a_ : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''', a_, )
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> str:
super().__init__(_lowercase )
a_ : str = config.num_labels
a_ : Optional[Any] = BertModelWithPabee(_lowercase )
a_ : int = nn.Dropout(config.hidden_dropout_prob )
a_ : str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Tuple:
a_ : Optional[Any] = self.bert(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a_ : Optional[Any] = (logits[-1],)
if labels is not None:
a_ : int = None
a_ : Union[str, Any] = 0
for ix, logits_item in enumerate(_lowercase ):
if self.num_labels == 1:
# We are doing regression
a_ : Any = MSELoss()
a_ : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a_ : Any = CrossEntropyLoss()
a_ : int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a_ : str = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a_ : Any = (total_loss / total_weights,) + outputs
return outputs
| 540 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=16 , __lowerCAmelCase=36 , __lowerCAmelCase=6 , __lowerCAmelCase=6 , __lowerCAmelCase=6 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AlbertModel(config=__A )
model.to(__A )
model.eval()
lowercase = model(__A , attention_mask=__A , token_type_ids=__A )
lowercase = model(__A , token_type_ids=__A )
lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
lowercase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowercase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowercase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
lowercase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowercase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_choices
lowercase = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
snake_case__ : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : Union[str, Any] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def A__ ( self ):
"""simple docstring"""
lowercase = AlbertModelTester(self )
lowercase = ConfigTester(self , config_class=__A , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*__A )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = AlbertModel.from_pretrained("""albert-base-v2""" )
lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase = model(__A , attention_mask=__A )[0]
lowercase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
lowercase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
| 714 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : str ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Optional[int] ={
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : Optional[int] ={
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = ['input_ids', 'attention_mask']
snake_case__ : Dict = BartTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**__lowerCAmelCase )
lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase = """post_processor"""
lowercase = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase = tuple(state["""sep"""] )
if "cls" in state:
lowercase = tuple(state["""cls"""] )
lowercase = False
if state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = add_prefix_space
lowercase = True
if state.get("""trim_offsets""" , __lowerCAmelCase ) != trim_offsets:
lowercase = trim_offsets
lowercase = True
if changes_to_apply:
lowercase = getattr(__lowerCAmelCase , state.pop("""type""" ) )
lowercase = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
lowercase = value
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 197 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( snake_case ) -> List[str]:
__lowercase = image.size
__lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowercase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__lowercase = np.array(__UpperCAmelCase ).astype(np.floataa ) / 255.0
__lowercase = image[None].transpose(0 , 3 , 1 , 2 )
__lowercase = torch.from_numpy(__UpperCAmelCase )
return 2.0 * image - 1.0
class snake_case_ ( a_ ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : VQModel , __lowerCamelCase : UNetaDModel , __lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Optional[int] = 100 , __lowerCamelCase : Optional[float] = 0.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(__lowerCamelCase , PIL.Image.Image ):
__lowercase = 1
elif isinstance(__lowerCamelCase , torch.Tensor ):
__lowercase = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase )}" )
if isinstance(__lowerCamelCase , PIL.Image.Image ):
__lowercase = preprocess(__lowerCamelCase )
__lowercase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowercase = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowercase = next(self.unet.parameters() ).dtype
__lowercase = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
__lowercase = image.to(device=self.device , dtype=__lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
__lowercase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(__lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__lowercase = torch.cat([latents, image] , dim=1 )
__lowercase = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
__lowercase = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__lowercase = self.vqvae.decode(__lowerCamelCase ).sample
__lowercase = torch.clamp(__lowerCamelCase , -1.0 , 1.0 )
__lowercase = image / 2 + 0.5
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 375 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _lowerCamelCase ( nn.Module ):
def __init__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Linear(3 , 4 )
lowerCAmelCase__ : int = nn.BatchNormad(4 )
lowerCAmelCase__ : Optional[Any] = nn.Linear(4 , 5 )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) )
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , model.state_dict() )
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """index.json""" )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCAmelCase__ : Union[str, Any] = torch.randn(2 , 3 , dtype=UpperCamelCase )
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : Optional[Any] = offload_weight(UpperCamelCase , """weight""" , UpperCamelCase , {} )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """weight.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
self.assertDictEqual(UpperCamelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCamelCase ).split(""".""" )[1]}} )
lowerCAmelCase__ : Any = load_offloaded_weight(UpperCamelCase , index["""weight"""] )
self.assertTrue(torch.equal(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ModelForTest()
lowerCAmelCase__ : Optional[Any] = model.state_dict()
lowerCAmelCase__ : Tuple = {k: v for k, v in state_dict.items() if """linear2""" not in k}
lowerCAmelCase__ : Any = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) )
lowerCAmelCase__ : str = {k: v for k, v in state_dict.items() if """weight""" in k}
lowerCAmelCase__ : str = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , UpperCamelCase )
# Duplicates are removed
lowerCAmelCase__ : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) )
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
lowerCAmelCase__ : Any = extract_submodules_state_dict(UpperCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCamelCase , {"""a.1""": 0, """a.2""": 2} )
lowerCAmelCase__ : str = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
lowerCAmelCase__ : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCamelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
| 299 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCAmelCase_ = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 232 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : str =["input_ids", "attention_mask"]
def __init__( self : str , _snake_case : Tuple="</s>" , _snake_case : List[str]="<unk>" , _snake_case : Dict="<pad>" , _snake_case : Any=125 , _snake_case : Optional[Any]=None , **_snake_case : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a__ = [F'''<extra_id_{i}>''' for i in range(_snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a__ = len(set(filter(lambda _snake_case : bool('extra_id' in str(_snake_case ) ) , _snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
a__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
a__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
a__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
a__ = extra_ids
a__ = 2**8 # utf is 8 bits
# define special tokens dict
a__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a__ = len(self.special_tokens_encoder )
a__ = len(_snake_case )
for i, token in enumerate(_snake_case ):
a__ = self.vocab_size + i - n
a__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowerCAmelCase ( self : Optional[int] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case )) + [1]
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
def _lowerCAmelCase ( self : Optional[Any] , _snake_case : List[int] ) -> List[int]:
'''simple docstring'''
if len(_snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCAmelCase ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ = self._add_eos_if_not_present(_snake_case )
if token_ids_a is None:
return token_ids_a
else:
a__ = self._add_eos_if_not_present(_snake_case )
return token_ids_a + token_ids_a
def _lowerCAmelCase ( self : List[str] , _snake_case : str ) -> List[str]:
'''simple docstring'''
a__ = [chr(_snake_case ) for i in text.encode('utf-8' )]
return tokens
def _lowerCAmelCase ( self : str , _snake_case : List[str] ) -> str:
'''simple docstring'''
if token in self.special_tokens_encoder:
a__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a__ = self.added_tokens_encoder[token]
elif len(_snake_case ) != 1:
a__ = self.unk_token_id
else:
a__ = ord(_snake_case ) + self._num_special_tokens
return token_id
def _lowerCAmelCase ( self : str , _snake_case : Any ) -> List[str]:
'''simple docstring'''
if index in self.special_tokens_decoder:
a__ = self.special_tokens_decoder[index]
else:
a__ = chr(index - self._num_special_tokens )
return token
def _lowerCAmelCase ( self : Dict , _snake_case : Any ) -> str:
'''simple docstring'''
a__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
a__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
a__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
a__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
a__ = token.encode('utf-8' )
else:
a__ = bytes([ord(_snake_case )] )
bstring += tok_string
a__ = bstring.decode('utf-8' , errors='ignore' )
return string
def _lowerCAmelCase ( self : int , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
return ()
| 232 | 1 |
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import math
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ = n
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = w
def __a ( self ) -> List[str]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 279 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.