code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__SCREAMING_SNAKE_CASE = """▁"""
# Segments (not really needed)
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[Any] = 'left'
lowerCAmelCase_ : List[str] = XLNetTokenizer
def __init__( self :Union[str, Any] , UpperCamelCase__ :Optional[Any]=None , UpperCamelCase__ :Optional[int]=None , UpperCamelCase__ :Union[str, Any]=False , UpperCamelCase__ :int=True , UpperCamelCase__ :Union[str, Any]=False , UpperCamelCase__ :int="<s>" , UpperCamelCase__ :Any="</s>" , UpperCamelCase__ :List[Any]="<unk>" , UpperCamelCase__ :List[str]="<sep>" , UpperCamelCase__ :Optional[int]="<pad>" , UpperCamelCase__ :Dict="<cls>" , UpperCamelCase__ :str="<mask>" , UpperCamelCase__ :Tuple=["<eop>", "<eod>"] , **UpperCamelCase__ :Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
_a = 3
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :str , UpperCamelCase__ :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 388 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE = False
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = 12
_a = 12
_a = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_a = TransformeraDModel(**UpperCamelCase__ )
return model
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
_a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
_a = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , )
_a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 388 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """glpn"""
def __init__(self , __a=3 , __a=4 , __a=[2, 2, 2, 2] , __a=[8, 4, 2, 1] , __a=[3_2, 6_4, 1_6_0, 2_5_6] , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[1, 2, 5, 8] , __a=[4, 4, 4, 4] , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=0.1 , __a=1E-6 , __a=6_4 , __a=1_0 , __a=-1 , **__a , ) -> List[str]:
"""simple docstring"""
super().__init__(**__a)
__snake_case : int = num_channels
__snake_case : int = num_encoder_blocks
__snake_case : Union[str, Any] = depths
__snake_case : Optional[Any] = sr_ratios
__snake_case : Optional[Any] = hidden_sizes
__snake_case : Tuple = patch_sizes
__snake_case : str = strides
__snake_case : List[str] = mlp_ratios
__snake_case : Tuple = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : int = initializer_range
__snake_case : Optional[Any] = drop_path_rate
__snake_case : Optional[int] = layer_norm_eps
__snake_case : List[Any] = decoder_hidden_size
__snake_case : Tuple = max_depth
__snake_case : Optional[int] = head_in_index | 704 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A ) | 61 | 0 |
from __future__ import annotations
__a = tuple[int, int, int]
__a = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__a = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__a = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__a = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__a = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__a = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__a = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__a = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__a = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__a = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__a = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def a ( snake_case__: RotorPositionT , snake_case__: RotorSelectionT , snake_case__: str ):
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(snake_case__ ) )) < 3:
lowercase_ = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(snake_case__ )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ = rotpos
if not 0 < rotorposa <= len(snake_case__ ):
lowercase_ = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(snake_case__ )
if not 0 < rotorposa <= len(snake_case__ ):
lowercase_ = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(snake_case__ )
if not 0 < rotorposa <= len(snake_case__ ):
lowercase_ = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(snake_case__ )
# Validates string and returns dict
lowercase_ = _plugboard(snake_case__ )
return rotpos, rotsel, pbdict
def a ( snake_case__: str ):
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(snake_case__ , snake_case__ ):
lowercase_ = F'''Plugboard setting isn\'t type string ({type(snake_case__ )})'''
raise TypeError(snake_case__ )
elif len(snake_case__ ) % 2 != 0:
lowercase_ = F'''Odd number of symbols ({len(snake_case__ )})'''
raise Exception(snake_case__ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
lowercase_ = set()
for i in pbstring:
if i not in abc:
lowercase_ = F'''\'{i}\' not in list of symbols'''
raise Exception(snake_case__ )
elif i in tmppbl:
lowercase_ = F'''Duplicate symbol ({i})'''
raise Exception(snake_case__ )
else:
tmppbl.add(snake_case__ )
del tmppbl
# Created the dictionary
lowercase_ = {}
for j in range(0 , len(snake_case__ ) - 1 , 2 ):
lowercase_ = pbstring[j + 1]
lowercase_ = pbstring[j]
return pb
def a ( snake_case__: str , snake_case__: RotorPositionT , snake_case__: RotorSelectionT = (rotora, rotora, rotora) , snake_case__: str = "" , ):
'''simple docstring'''
lowercase_ = text.upper()
lowercase_ , lowercase_ , lowercase_ = _validator(
snake_case__ , snake_case__ , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ = rotor_position
lowercase_ , lowercase_ , lowercase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# rotor ra --------------------------
lowercase_ = abc.index(snake_case__ ) + rotorposa
lowercase_ = rotora[index % len(snake_case__ )]
# rotor rb --------------------------
lowercase_ = abc.index(snake_case__ ) + rotorposa
lowercase_ = rotora[index % len(snake_case__ )]
# rotor rc --------------------------
lowercase_ = abc.index(snake_case__ ) + rotorposa
lowercase_ = rotora[index % len(snake_case__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ = reflector[symbol]
# 2nd rotors
lowercase_ = abc[rotora.index(snake_case__ ) - rotorposa]
lowercase_ = abc[rotora.index(snake_case__ ) - rotorposa]
lowercase_ = abc[rotora.index(snake_case__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(snake_case__ ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(snake_case__ ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(snake_case__ ):
lowercase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
__a = 'This is my Python script that emulates the Enigma machine from WWII.'
__a = (1, 1, 1)
__a = 'pictures'
__a = (rotora, rotora, rotora)
__a = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 97 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 537 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_A : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : tuple , snake_case_ : Path , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any]=False , ) -> Optional[int]:
'''simple docstring'''
output_path.parent.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , use_external_data_format=snake_case_ , enable_onnx_checker=snake_case_ , opset_version=snake_case_ , )
else:
export(
snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , opset_version=snake_case_ , )
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : bool = False ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=snake_case_ ).to(snake_case_ )
__lowerCAmelCase = Path(snake_case_ )
# TEXT ENCODER
__lowerCAmelCase = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=snake_case_ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=snake_case_ , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase = pipeline.unet.config.in_channels
__lowerCAmelCase = pipeline.unet.config.sample_size
__lowerCAmelCase = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(2 ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(2 , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=snake_case_ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=snake_case_ , use_external_data_format=snake_case_ , )
__lowerCAmelCase = str(unet_path.absolute().as_posix() )
__lowerCAmelCase = os.path.dirname(snake_case_ )
__lowerCAmelCase = onnx.load(snake_case_ )
# clean up existing tensor files
shutil.rmtree(snake_case_ )
os.mkdir(snake_case_ )
# collate external tensor files into one
onnx.save_model(
snake_case_ , snake_case_ , save_as_external_data=snake_case_ , all_tensors_to_one_file=snake_case_ , location="""weights.pb""" , convert_attribute=snake_case_ , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_encoder.config.in_channels
__lowerCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase = lambda snake_case_ , snake_case_ : vae_encoder.encode(snake_case_ , snake_case_ )[0].sample()
onnx_export(
snake_case_ , model_args=(
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=snake_case_ , )
# VAE DECODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_decoder.config.latent_channels
__lowerCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase = vae_encoder.decode
onnx_export(
snake_case_ , model_args=(
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=snake_case_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase = pipeline.safety_checker
__lowerCAmelCase = safety_checker.config.vision_config.num_channels
__lowerCAmelCase = safety_checker.config.vision_config.image_size
__lowerCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , snake_case_ , snake_case_ , snake_case_ , ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=snake_case_ , )
del pipeline.safety_checker
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
__lowerCAmelCase = pipeline.feature_extractor
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=snake_case_ , feature_extractor=snake_case_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(snake_case_ )
print("""ONNX pipeline saved to""" , snake_case_ )
del pipeline
del onnx_pipeline
__lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(snake_case_ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_A : Dict = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 330 | '''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : str = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple=1_25 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(SCREAMING_SNAKE_CASE__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool("""extra_id""" in str(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
__lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
__lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
__lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , extra_ids=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def a ( self : str ) -> Tuple:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def a ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[int] ) -> List[int]:
if len(SCREAMING_SNAKE_CASE__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
return token_ids_a + token_ids_a
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE__ ) for i in text.encode("""utf-8""" )]
return tokens
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(SCREAMING_SNAKE_CASE__ ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(SCREAMING_SNAKE_CASE__ ) + self._num_special_tokens
return token_id
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
__lowerCAmelCase = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("""utf-8""" )
else:
__lowerCAmelCase = bytes([ord(SCREAMING_SNAKE_CASE__ )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
return ()
| 330 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """esm"""
def __init__( self : str , a__ : Optional[int]=None , a__ : Any=None , a__ : int=None , a__ : List[Any]=768 , a__ : Any=12 , a__ : Optional[int]=12 , a__ : List[str]=3072 , a__ : Dict=0.1 , a__ : Any=0.1 , a__ : Union[str, Any]=1026 , a__ : str=0.02 , a__ : str=1E-12 , a__ : List[Any]="absolute" , a__ : Optional[int]=True , a__ : str=None , a__ : Tuple=False , a__ : List[Any]=False , a__ : List[Any]=None , a__ : Tuple=None , **a__ : Union[str, Any] , ):
super().__init__(pad_token_id=a_ , mask_token_id=a_ , **a_ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = emb_layer_norm_before
__magic_name__ = token_dropout
__magic_name__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
__magic_name__ = EsmFoldConfig()
elif isinstance(a_ , a_ ):
__magic_name__ = EsmFoldConfig(**a_ )
__magic_name__ = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
__magic_name__ = get_default_vocab_list()
else:
__magic_name__ = vocab_list
else:
__magic_name__ = None
__magic_name__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , a_ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def snake_case__ ( self : int ):
__magic_name__ = super().to_dict()
if isinstance(self.esmfold_config , a_ ):
__magic_name__ = self.esmfold_config.to_dict()
return output
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :str = None
__SCREAMING_SNAKE_CASE :Optional[int] = True
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Any = 0
__SCREAMING_SNAKE_CASE :Any = True
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Dict = 128
__SCREAMING_SNAKE_CASE :Optional[int] = None
def snake_case__ ( self : Any ):
if self.trunk is None:
__magic_name__ = TrunkConfig()
elif isinstance(self.trunk , a_ ):
__magic_name__ = TrunkConfig(**self.trunk )
def snake_case__ ( self : Tuple ):
__magic_name__ = asdict(self )
__magic_name__ = self.trunk.to_dict()
return output
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :Union[str, Any] = 48
__SCREAMING_SNAKE_CASE :Any = 1024
__SCREAMING_SNAKE_CASE :List[str] = 128
__SCREAMING_SNAKE_CASE :Optional[Any] = 32
__SCREAMING_SNAKE_CASE :str = 32
__SCREAMING_SNAKE_CASE :Optional[Any] = 32
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
__SCREAMING_SNAKE_CASE :Tuple = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = False
__SCREAMING_SNAKE_CASE :Tuple = 4
__SCREAMING_SNAKE_CASE :Tuple = 128
__SCREAMING_SNAKE_CASE :Tuple = None
def snake_case__ ( self : List[Any] ):
if self.structure_module is None:
__magic_name__ = StructureModuleConfig()
elif isinstance(self.structure_module , a_ ):
__magic_name__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__magic_name__ = self.sequence_state_dim // self.sequence_head_width
__magic_name__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def snake_case__ ( self : List[Any] ):
__magic_name__ = asdict(self )
__magic_name__ = self.structure_module.to_dict()
return output
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :List[str] = 384
__SCREAMING_SNAKE_CASE :Union[str, Any] = 128
__SCREAMING_SNAKE_CASE :str = 16
__SCREAMING_SNAKE_CASE :Optional[Any] = 128
__SCREAMING_SNAKE_CASE :Optional[Any] = 12
__SCREAMING_SNAKE_CASE :Dict = 4
__SCREAMING_SNAKE_CASE :Any = 8
__SCREAMING_SNAKE_CASE :List[Any] = 0.1
__SCREAMING_SNAKE_CASE :Dict = 8
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
__SCREAMING_SNAKE_CASE :List[Any] = 2
__SCREAMING_SNAKE_CASE :str = 7
__SCREAMING_SNAKE_CASE :str = 10
__SCREAMING_SNAKE_CASE :Any = 1E-8
__SCREAMING_SNAKE_CASE :List[Any] = 1E5
def snake_case__ ( self : Any ):
return asdict(self )
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 432 |
"""simple docstring"""
import string
import numpy
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class lowercase:
'''simple docstring'''
lowercase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase__ = numpy.vectorize(lambda __a : x % 36 )
lowercase__ = numpy.vectorize(__a )
def __init__( self: str, a_: numpy.ndarray ):
'''simple docstring'''
_snake_case : Optional[Any] = self.modulus(a_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_snake_case : Tuple = encrypt_key.shape[0]
def UpperCamelCase_ ( self: Dict, a_: str ):
'''simple docstring'''
return self.key_string.index(a_ )
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
return self.key_string[round(a_ )]
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : str = det % len(self.key_string )
_snake_case : List[Any] = len(self.key_string )
if greatest_common_divisor(a_, len(self.key_string ) ) != 1:
_snake_case : Optional[Any] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
_snake_case : Dict = [char for char in text.upper() if char in self.key_string]
_snake_case : str = chars[-1]
while len(a_ ) % self.break_key != 0:
chars.append(a_ )
return "".join(a_ )
def UpperCamelCase_ ( self: List[Any], a_: str ):
'''simple docstring'''
_snake_case : List[Any] = self.process_text(text.upper() )
_snake_case : Any = """"""
for i in range(0, len(a_ ) - self.break_key + 1, self.break_key ):
_snake_case : List[Any] = text[i : i + self.break_key]
_snake_case : Dict = [self.replace_letters(a_ ) for char in batch]
_snake_case : List[Any] = numpy.array([vec] ).T
_snake_case : List[Any] = self.modulus(self.encrypt_key.dot(a_ ) ).T.tolist()[
0
]
_snake_case : str = """""".join(
self.replace_digits(a_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : Any = det % len(self.key_string )
_snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_snake_case : List[str] = i
break
_snake_case : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(a_ ) )
def UpperCamelCase_ ( self: int, a_: str ):
'''simple docstring'''
_snake_case : List[str] = self.make_decrypt_key()
_snake_case : List[str] = self.process_text(text.upper() )
_snake_case : Any = """"""
for i in range(0, len(a_ ) - self.break_key + 1, self.break_key ):
_snake_case : Union[str, Any] = text[i : i + self.break_key]
_snake_case : int = [self.replace_letters(a_ ) for char in batch]
_snake_case : Optional[Any] = numpy.array([vec] ).T
_snake_case : List[Any] = self.modulus(decrypt_key.dot(a_ ) ).T.tolist()[0]
_snake_case : Dict = """""".join(
self.replace_digits(a_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = int(input("""Enter the order of the encryption key: """ ) )
_snake_case : int = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case__ ):
_snake_case : List[Any] = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
_snake_case : Any = HillCipher(numpy.array(snake_case__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_snake_case : Any = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_snake_case : Optional[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
_snake_case : List[Any] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 609 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
# Initialise PyTorch model
UpperCamelCase = MobileBertConfig.from_json_file(_lowercase )
print(F'Building PyTorch model from configuration: {config}' )
UpperCamelCase = MobileBertForPreTraining(_lowercase )
# Load weights from tf checkpoint
UpperCamelCase = load_tf_weights_in_mobilebert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 170 |
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if k in (0.04, 0.06):
UpperCamelCase = k
UpperCamelCase = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ):
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
UpperCamelCase , UpperCamelCase = img.shape
UpperCamelCase = []
UpperCamelCase = img.copy()
UpperCamelCase = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_GRAY2RGB )
UpperCamelCase , UpperCamelCase = np.gradient(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = dx**2
UpperCamelCase = dy**2
UpperCamelCase = dx * dy
UpperCamelCase = 0.04
UpperCamelCase = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE__ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE__ , w - offset ):
UpperCamelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = (wxx * wyy) - (wxy**2)
UpperCamelCase = wxx + wyy
UpperCamelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 170 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : List[Any] ):
a__ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
a__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
a__ : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
a__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _UpperCamelCase( self : Optional[int] ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
a__ : int = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase( self : Optional[int] ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
a__ : Tuple = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase( self : Tuple ):
a__ : List[str] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase( self : Any ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
a__ : Optional[int] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase : str = Accelerator()
UpperCamelCase : int = (accelerator.state.process_index + 2, 10)
UpperCamelCase : Optional[Any] = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase : Optional[int] = """"""
UpperCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 37 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger("""transformers.models.encodec""")
SCREAMING_SNAKE_CASE :Tuple = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
SCREAMING_SNAKE_CASE :List[str] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE :Optional[int] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE :int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE :Dict = []
SCREAMING_SNAKE_CASE :int = []
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]:
"""simple docstring"""
for attribute in key.split("." ):
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
UpperCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
UpperCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
elif weight_type == "running_mean":
UpperCamelCase_ = value
elif weight_type == "running_var":
UpperCamelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase_ = value
elif weight_type == "weight_ih_l0":
UpperCamelCase_ = value
elif weight_type == "weight_hh_l0":
UpperCamelCase_ = value
elif weight_type == "bias_ih_l0":
UpperCamelCase_ = value
elif weight_type == "bias_hh_l0":
UpperCamelCase_ = value
elif weight_type == "weight_ih_l1":
UpperCamelCase_ = value
elif weight_type == "weight_hh_l1":
UpperCamelCase_ = value
elif weight_type == "bias_ih_l1":
UpperCamelCase_ = value
elif weight_type == "bias_hh_l1":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase_ , UpperCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
UpperCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase_ = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info(f"{name} was ignored" )
continue
UpperCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase_ , UpperCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
UpperCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
UpperCamelCase_ = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
UpperCamelCase_ = "weight_g"
elif "weight_v" in name:
UpperCamelCase_ = "weight_v"
elif "weight_ih_l0" in name:
UpperCamelCase_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
UpperCamelCase_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
UpperCamelCase_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
UpperCamelCase_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
UpperCamelCase_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
UpperCamelCase_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
UpperCamelCase_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
UpperCamelCase_ = "bias_hh_l1"
elif "bias" in name:
UpperCamelCase_ = "bias"
elif "weight" in name:
UpperCamelCase_ = "weight"
elif "running_mean" in name:
UpperCamelCase_ = "running_mean"
elif "running_var" in name:
UpperCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCamelCase_ = "num_batches_tracked"
else:
UpperCamelCase_ = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase_ = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase_ = [8, 5, 4, 4]
UpperCamelCase_ = [2.2]
UpperCamelCase_ = 6_4
UpperCamelCase_ = 3_2_0_0_0
UpperCamelCase_ = 2_0_4_8
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
elif model_name == "encodec_48khz":
UpperCamelCase_ = [8, 5, 4, 2]
UpperCamelCase_ = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase_ = 4_8_0_0_0
UpperCamelCase_ = 2
UpperCamelCase_ = False
UpperCamelCase_ = "time_group_norm"
UpperCamelCase_ = True
UpperCamelCase_ = 1.0
UpperCamelCase_ = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
UpperCamelCase_ = EncodecModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase_ = original_checkpoint["best_state"]
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 628 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self : Any ):
'''simple docstring'''
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __a ( self : Dict ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
@require_multi_gpu
def __a ( self : Optional[Any] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
@require_multi_gpu
def __a ( self : Tuple ):
'''simple docstring'''
__a = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
@require_multi_gpu
def __a ( self : str ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ = (accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE_ = torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 707 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__a = []
__a = set({"""(""", """[""", """{"""} )
__a = set({""")""", """]""", """}"""} )
__a = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__SCREAMING_SNAKE_CASE ) == 0
def __lowercase ( ) -> List[str]:
"""simple docstring"""
__a = input("""Enter sequence of brackets: """ )
if is_balanced(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , """is balanced""" )
else:
print(__SCREAMING_SNAKE_CASE , """is not balanced""" )
if __name__ == "__main__":
main()
| 201 | 0 |
'''simple docstring'''
from math import pow, sqrt
def _UpperCamelCase ( *__UpperCamelCase ) -> bool:
lowerCamelCase_ = len(__UpperCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 42 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 1 |
import math
lowerCamelCase__ = 10
lowerCamelCase__ = 7
lowerCamelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( snake_case__ : int = 20 ):
'''simple docstring'''
__snake_case :Dict = math.comb(snake_case__ ,snake_case__ )
__snake_case :int = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,snake_case__ )
__snake_case :List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 720 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class snake_case__ ( unittest.TestCase , lowercase_):
'''simple docstring'''
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :int = load_tool("""text-question-answering""" )
self.tool.setup()
__snake_case :Tuple = load_tool("""text-question-answering""" , remote=a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :List[str] = self.tool(a__ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(a__ , """launched the BigScience Research Workshop""" )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :List[str] = self.remote_tool(a__ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(a__ , """launched the BigScience Research Workshop""" )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = self.tool(text=a__ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(a__ , """launched the BigScience Research Workshop""" )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :List[str] = self.remote_tool(text=a__ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(a__ , """launched the BigScience Research Workshop""" )
| 291 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for num in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE : Any = odd_composites[num] - 2 * i * i
if is_prime(__UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCamelCase ) == n:
return list_nums
return []
def lowercase__( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 28 | """simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase="divided_space_time" , _UpperCAmelCase=None , ):
lowercase__: Tuple = parent
lowercase__: Optional[int] = batch_size
lowercase__: Tuple = image_size
lowercase__: Dict = num_channels
lowercase__: Tuple = patch_size
lowercase__: Optional[Any] = num_frames
lowercase__: str = is_training
lowercase__: int = use_labels
lowercase__: Dict = hidden_size
lowercase__: str = num_hidden_layers
lowercase__: Dict = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: Optional[int] = hidden_act
lowercase__: str = hidden_dropout_prob
lowercase__: List[Any] = attention_probs_dropout_prob
lowercase__: Union[str, Any] = attention_type
lowercase__: List[Any] = initializer_range
lowercase__: Optional[int] = scope
lowercase__: int = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__: Optional[Any] = (image_size // patch_size) ** 2
lowercase__: Tuple = (num_frames) * self.num_patches_per_frame + 1
def _snake_case ( self ):
lowercase__: Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__: Optional[int] = None
if self.use_labels:
lowercase__: List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__: List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
lowercase__: Optional[Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__: int = self.num_labels
return config
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = TimesformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = TimesformerForVideoClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase )
# verify the logits shape
lowercase__: List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: Any = config_and_inputs
lowercase__: Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCAmelCase :Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[int] = False
def _snake_case ( self ):
lowercase__: str = TimesformerModelTester(self )
lowercase__: Tuple = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: str = copy.deepcopy(_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__: int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _snake_case ( self ):
lowercase__, lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = model_class(_UpperCAmelCase )
lowercase__: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Any = [*signature.parameters.keys()]
lowercase__: List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[Any] = TimesformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( self ):
if not self.has_attentions:
pass
else:
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[Any] = True
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = self.model_tester.seq_length
lowercase__: Any = self.model_tester.num_frames
lowercase__: Optional[Any] = True
lowercase__: Optional[int] = False
lowercase__: Dict = True
lowercase__: Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__: Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: Any = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__: List[Any] = True
lowercase__: str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__: int = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: Any = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__: List[str] = len(_UpperCAmelCase )
# Check attention is always last and order is fine
lowercase__: List[Any] = True
lowercase__: Any = True
lowercase__: str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__: Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
lowercase__: List[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _snake_case ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__: Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: List[str] = outputs.hidden_states
lowercase__: Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
lowercase__: Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase__: Optional[Any] = np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_UpperCAmelCase )
lowercase__: Optional[Any] = self.default_image_processor
lowercase__: int = prepare_video()
lowercase__: Any = image_processor(video[:8] , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__: Union[str, Any] = model(**_UpperCAmelCase )
# verify the logits
lowercase__: Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__: Any = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 586 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCamelCase__ : Union[str, Any] = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCamelCase__ : str = {'unk_token': '<unk>'}
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname, lowerCamelCase_ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(lowerCamelCase_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Dict = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowerCamelCase__ : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase_, padding_value=1.0 )
lowerCamelCase__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : List[str] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : int = image_processor(lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : Optional[Any] = processor(images=lowerCamelCase_, return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 'lower newer'
lowerCamelCase__ : Optional[int] = processor(text=lowerCamelCase_ )
lowerCamelCase__ : Dict = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=lowerCamelCase_, images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Tuple = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(lowerCamelCase_ )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : int = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_, images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_A = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "dhaka" , __UpperCAmelCase = 5 ) -> int:
SCREAMING_SNAKE_CASE__ = min(__snake_case , 50 ) # Prevent abuse!
SCREAMING_SNAKE_CASE__ = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
SCREAMING_SNAKE_CASE__ = requests.get("https://www.google.com/search" , params=__snake_case , headers=__snake_case )
SCREAMING_SNAKE_CASE__ = BeautifulSoup(html.text , "html.parser" )
SCREAMING_SNAKE_CASE__ = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
SCREAMING_SNAKE_CASE__ = json.dumps(__snake_case )
SCREAMING_SNAKE_CASE__ = json.loads(__snake_case )
SCREAMING_SNAKE_CASE__ = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __snake_case , )
if not matched_google_image_data:
return 0
SCREAMING_SNAKE_CASE__ = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__snake_case ) , )
SCREAMING_SNAKE_CASE__ = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __snake_case , )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
SCREAMING_SNAKE_CASE__ = bytes(__snake_case , "ascii" ).decode(
"unicode-escape" )
SCREAMING_SNAKE_CASE__ = bytes(__snake_case , "ascii" ).decode(
"unicode-escape" )
SCREAMING_SNAKE_CASE__ = urllib.request.build_opener()
SCREAMING_SNAKE_CASE__ = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__snake_case )
SCREAMING_SNAKE_CASE__ = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_A = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('Please provide a search term.')
raise
| 159 |
def _SCREAMING_SNAKE_CASE ( __snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = []
for i in range(1 , n + 1 ):
_UpperCAmelCase = prev_numerator + 2 * prev_denominator
_UpperCAmelCase = prev_numerator + prev_denominator
if len(str(__snake_case ) ) > len(str(__snake_case ) ):
result.append(__snake_case )
_UpperCAmelCase = numerator
_UpperCAmelCase = denominator
return len(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }") | 108 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# Initialise PyTorch model
lowerCAmelCase__ : int = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase__ : Tuple = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Any = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_ ( _lowerCAmelCase : dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Any = set()
# keep track of all the paths to be checked
_lowerCamelCase : Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : Tuple = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Any = list(_lowerCAmelCase )
new_path.append(_lowerCAmelCase )
queue.append(_lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def A_ ( _lowerCAmelCase : dict , _lowerCAmelCase : Any , _lowerCAmelCase : int ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Tuple = [start]
_lowerCamelCase : List[str] = set(_lowerCAmelCase )
# Keep tab on distances from `start` node.
_lowerCamelCase : Dict = {start: 0, target: -1}
while queue:
_lowerCamelCase : Any = queue.pop(0 )
if node == target:
_lowerCamelCase : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCAmelCase )
queue.append(_lowerCAmelCase )
_lowerCamelCase : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 44 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''') | 44 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ):
# Format the message.
if name is None:
__snake_case : Optional[Any] = None
else:
__snake_case : Dict = "." * max(0 , spaces - 2 ) + "# {:" + str(5_0 - spaces ) + "s}"
__snake_case : Dict = fmt.format(__lowerCamelCase )
# Print and recurse (if needed).
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if msg is not None:
print(__lowerCamelCase )
for k in val.keys():
recursive_print(__lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCamelCase , torch.Tensor ):
print(__lowerCamelCase , ":" , val.size() )
else:
print(__lowerCamelCase , ":" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__snake_case : Union[str, Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__snake_case : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
__snake_case : int = param.view(*__lowerCamelCase )
__snake_case : List[Any] = param.transpose(0 , 2 )
__snake_case : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__snake_case : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__snake_case : List[str] = param.view(*__lowerCamelCase )
__snake_case : str = param.transpose(0 , 1 ).contiguous()
__snake_case : Any = param.view(*__lowerCamelCase )
return param
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# The converted output model.
__snake_case : List[Any] = {}
# old versions did not store training args
__snake_case : List[str] = input_state_dict.get("args" , __lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__snake_case : Union[str, Any] = ds_args.padded_vocab_size
__snake_case : int = ds_args.max_position_embeddings
__snake_case : List[Any] = ds_args.hidden_size
__snake_case : List[Any] = ds_args.num_layers
__snake_case : Optional[Any] = ds_args.num_attention_heads
__snake_case : Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__snake_case : str = config.n_head
# The hidden_size per head.
__snake_case : int = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__snake_case : Tuple = input_state_dict["checkpoint_version"]
else:
__snake_case : Any = 0.0
# The model.
__snake_case : Dict = input_state_dict["model"]
# The language model.
__snake_case : str = model["language_model"]
# The embeddings.
__snake_case : List[str] = lm["embedding"]
# The word embeddings.
__snake_case : Any = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
__snake_case : Tuple = word_embeddings[: config.vocab_size, :]
__snake_case : Tuple = word_embeddings
# The position embeddings.
__snake_case : Tuple = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__snake_case : Dict = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
__snake_case : Union[str, Any] = pos_embeddings
# The transformer.
__snake_case : List[str] = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
__snake_case : Optional[int] = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
__snake_case : Optional[Any] = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__snake_case : List[str] = layer_re.match(__lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__snake_case : int = int(m.group(1 ) )
# The name of the operation.
__snake_case : Dict = m.group(2 )
# Is it a weight or a bias?
__snake_case : Any = m.group(3 )
# The name of the layer.
__snake_case : List[Any] = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
__snake_case : Optional[int] = "ln_1" if op_name.startswith("input" ) else "ln_2"
__snake_case : Optional[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__snake_case : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__snake_case : Union[str, Any] = torch.tensor(-1e4 , dtype=torch.floataa )
__snake_case : Dict = masked_bias
__snake_case : Tuple = fix_query_key_value_ordering(__lowerCamelCase , __lowerCamelCase , 3 , __lowerCamelCase , __lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__snake_case : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
__snake_case : Optional[int] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__snake_case : Any = fix_query_key_value_ordering(__lowerCamelCase , __lowerCamelCase , 3 , __lowerCamelCase , __lowerCamelCase )
# Store. No change of shape.
__snake_case : Any = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__snake_case : str = megatron_to_transformers[op_name]
__snake_case : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__snake_case : Any = megatron_to_transformers[op_name]
__snake_case : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__snake_case : List[Any] = transformer["final_layernorm.weight"]
__snake_case : List[str] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
__snake_case : Tuple = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
__snake_case : int = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=__lowerCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__lowerCamelCase , help="An optional config json file describing the pre-trained model." , )
__snake_case : Optional[Any] = parser.parse_args()
# Extract the basename.
__snake_case : int = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
__snake_case : List[Any] = torch.load(__lowerCamelCase , map_location="cpu" )
else:
__snake_case : Any = torch.load(args.path_to_checkpoint , map_location="cpu" )
__snake_case : List[str] = input_state_dict.get("args" , __lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__snake_case : List[str] = "gelu_fast"
elif ds_args.openai_gelu:
__snake_case : Optional[int] = "gelu_new"
else:
__snake_case : int = "gelu"
else:
# in the very early days this used to be "gelu_new"
__snake_case : Tuple = "gelu_new"
# Spell out all parameters in case the defaults change.
__snake_case : Dict = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=__lowerCamelCase , summary_activation=__lowerCamelCase , summary_proj_to_labels=__lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
__snake_case : List[str] = GPTaConfig.from_json_file(args.config_file )
__snake_case : Tuple = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
__snake_case : Union[str, Any] = convert_megatron_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCamelCase , __lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__snake_case : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__snake_case : Optional[int] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
__snake_case : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
__snake_case : Optional[int] = "gpt2"
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
__snake_case : List[str] = type(__lowerCamelCase ).__name__
__snake_case : Optional[int] = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(__lowerCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__lowerCamelCase )
# Store the state_dict to file.
__snake_case : Optional[int] = os.path.join(__lowerCamelCase , "pytorch_model.bin" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__lowerCamelCase , __lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 714 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : Any = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = {}
__snake_case : List[Any] = R".*sequential.(\d+).*"
__snake_case : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
__snake_case : List[Any] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
__snake_case : str = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[str] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : List[str] = value
__snake_case : Optional[int] = mixed_qkv.size(0 ) // 3
__snake_case : List[str] = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : str = mixed_qkv[qkv_dim * 2 :]
__snake_case : int = query_layer
__snake_case : Tuple = key_layer
__snake_case : List[str] = value_layer
else:
__snake_case : Tuple = value
return model_state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : List[str] = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
__snake_case : Union[str, Any] = clap_model.state_dict()
__snake_case : Any = rename_state_dict(__lowerCamelCase )
__snake_case : Dict = ClapConfig()
__snake_case : Dict = enable_fusion
__snake_case : Optional[Any] = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 203 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_snake_case = _symbol_database.Default()
_snake_case = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_snake_case = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_snake_case = None
_snake_case = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_snake_case = 45
_snake_case = 1581
_snake_case = 1517
_snake_case = 1570
_snake_case = 1584
_snake_case = 1793
_snake_case = 1795
_snake_case = 1916
_snake_case = 1864
_snake_case = 1905
_snake_case = 1919
_snake_case = 2429
_snake_case = 2208
_snake_case = 2418
_snake_case = 2323
_snake_case = 2407
# @@protoc_insertion_point(module_scope)
| 655 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 1 |
def UpperCAmelCase ( A__ , A__ ) -> Any:
_snake_case : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case : List[str] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case : Optional[Any] = min(A__ , A__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 717 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase_ = '''scheduler_config.json'''
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 5
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE_ = ['dtype']
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = True
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case , _snake_case : Dict = cls.from_config(SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """create_state""" ) and getattr(SCREAMING_SNAKE_CASE__ , """has_state""" , SCREAMING_SNAKE_CASE__ ):
_snake_case : int = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.save_config(save_directory=SCREAMING_SNAKE_CASE__ , push_to_hub=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __lowerCamelCase( cls ):
"""simple docstring"""
_snake_case : Dict = list(set([cls.__name__] + cls._compatibles ) )
_snake_case : Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
_snake_case : List[str] = [
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return compatible_classes
def UpperCAmelCase ( A__ , A__ ) -> jnp.ndarray:
assert len(A__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A__ ) - x.ndim) ) , A__ )
def UpperCAmelCase ( A__ , A__=0.999 , A__=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(A__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_snake_case : List[Any] = []
for i in range(A__ ):
_snake_case : Optional[Any] = i / num_diffusion_timesteps
_snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A__ ) / alpha_bar(A__ ) , A__ ) )
return jnp.array(A__ , dtype=A__ )
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[Any] = scheduler.config
if config.trained_betas is not None:
_snake_case : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_snake_case : str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
_snake_case : str = 1.0 - betas
_snake_case : Dict = jnp.cumprod(SCREAMING_SNAKE_CASE__ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE__ , betas=SCREAMING_SNAKE_CASE__ , alphas_cumprod=SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> List[str]:
_snake_case : Union[str, Any] = state.alphas_cumprod
_snake_case : Tuple = alphas_cumprod[timesteps] ** 0.5
_snake_case : Union[str, Any] = sqrt_alpha_prod.flatten()
_snake_case : Optional[Any] = broadcast_to_shape_from_left(A__ , original_samples.shape )
_snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
_snake_case : List[str] = sqrt_one_minus_alpha_prod.flatten()
_snake_case : Any = broadcast_to_shape_from_left(A__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> Optional[int]:
_snake_case , _snake_case : Optional[Any] = get_sqrt_alpha_prod(A__ , A__ , A__ , A__ )
_snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> int:
_snake_case , _snake_case : Union[str, Any] = get_sqrt_alpha_prod(A__ , A__ , A__ , A__ )
_snake_case : List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 519 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 549 | import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_000}
@property
def __lowercase( self : List[str] )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
return F'''{self.framework}-transfromers-test'''
@property
def __lowercase( self : int )-> str:
"""simple docstring"""
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _a ( lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 85 | 0 |
def __UpperCamelCase ( a) ->float:
return 10 - x * x
def __UpperCamelCase ( a, a) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(a) * equation(a) >= 0:
raise ValueError("Wrong space!")
lowerCamelCase__ = a
while (b - a) >= 0.0_1:
# Find middle point
lowerCamelCase__ = (a + b) / 2
# Check if middle point is root
if equation(a) == 0.0:
break
# Decide the side to repeat the steps
if equation(a) * equation(a) < 0:
lowerCamelCase__ = c
else:
lowerCamelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 360 |
def __UpperCamelCase ( a, a, a=False) ->Dict:
if isinstance(a, a) and isinstance(a, a):
lowerCamelCase__ = len(set_a.intersection(a))
if alternative_union:
lowerCamelCase__ = len(a) + len(a)
else:
lowerCamelCase__ = len(set_a.union(a))
return intersection / union
if isinstance(a, (list, tuple)) and isinstance(a, (list, tuple)):
lowerCamelCase__ = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ = len(a) + len(a)
return len(a) / union
else:
lowerCamelCase__ = set_a + [element for element in set_b if element not in set_a]
return len(a) / len(a)
return len(a) / len(a)
return None
if __name__ == "__main__":
A_ = {"a", "b", "c", "d", "e"}
A_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 360 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : List[str] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'{test_file} instead.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
SCREAMING_SNAKE_CASE_ : Dict = components[:-1] + [test_fn.replace('.py' , '' )]
SCREAMING_SNAKE_CASE_ : List[str] = '.'.join(SCREAMING_SNAKE_CASE )
return test_module_path
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_module_path(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
return test_module
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : Tuple = get_test_module(SCREAMING_SNAKE_CASE )
for attr in dir(SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : str = get_test_module(SCREAMING_SNAKE_CASE )
for attr in dir(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_test_classes(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
SCREAMING_SNAKE_CASE_ : List[str] = None
if hasattr(SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = test.model_tester.__class__
return model_tester
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Any = get_test_classes(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] = get_test_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE_ : Tuple = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : str = get_test_classes(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Any = get_model_classes(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : str = get_model_classes(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return o
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return {to_json(SCREAMING_SNAKE_CASE ): to_json(SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 345 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = TransfoXLTokenizer
__lowerCamelCase : int = False
__lowerCamelCase : str = False
def __A ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : int = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __A ( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = '<unk> UNwanted , running'
SCREAMING_SNAKE_CASE_ : List[str] = '<unk> unwanted, running'
return input_text, output_text
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(__lowerCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [0, 4, 8, 7] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
SCREAMING_SNAKE_CASE_ : Dict = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase ) , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__lowerCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 345 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Optional[Any] = BlipImageProcessor()
__SCREAMING_SNAKE_CASE : str = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__SCREAMING_SNAKE_CASE : Optional[int] = BlipProcessor(_lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : List[str] , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def UpperCamelCase__ ( self : Tuple , **lowerCAmelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : str = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(_lowerCAmelCase , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE : int = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = """lower newer"""
__SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : int = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = """lower newer"""
__SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.batch_decode(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = """lower newer"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 709 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 178 | 0 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __A ( *a_ :Union[str, Any]) -> Union[str, Any]:
with open(a_ , '''r''') as fh:
fcntl.flock(a_ , fcntl.LOCK_EX)
try:
print(*a_)
finally:
fcntl.flock(a_ , fcntl.LOCK_UN)
A = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
A = torch.device('''cuda''', local_rank)
A = socket.gethostname()
A = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
A = dist.get_rank()
A = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise | 52 |
"""simple docstring"""
__A : int = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Any = frozenset(["prompt", "negative_prompt"])
__A : Optional[Any] = frozenset([])
__A : List[Any] = frozenset(["image"])
__A : int = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Any = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Any = frozenset(["prompt", "image", "negative_prompt"])
__A : Dict = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : str = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Union[str, Any] = frozenset(["image", "mask_image"])
__A : Tuple = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : int = frozenset(["example_image", "image", "mask_image"])
__A : Union[str, Any] = frozenset(["class_labels"])
__A : Union[str, Any] = frozenset(["class_labels"])
__A : List[Any] = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Tuple = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : List[str] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Tuple = frozenset(["prompt", "negative_prompt"])
__A : Dict = frozenset(["input_tokens"])
__A : List[str] = frozenset(["input_tokens"])
| 602 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger('''transformers.models.speecht5''')
__lowerCamelCase : Union[str, Any] = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__lowerCamelCase : Any = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__lowerCamelCase : str = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__lowerCamelCase : List[str] = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__lowerCamelCase : Optional[int] = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__lowerCamelCase : Tuple = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__lowerCamelCase : Tuple = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__lowerCamelCase : Union[str, Any] = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__lowerCamelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase : Optional[int] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : str = []
__lowerCamelCase : Dict = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__lowerCamelCase : List[str] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__lowerCamelCase : str = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__lowerCamelCase : Optional[Any] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE__ = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE__ = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
if task == "s2t":
SCREAMING_SNAKE_CASE__ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE__ = MAPPING_S2T
SCREAMING_SNAKE_CASE__ = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = MAPPING_T2S
SCREAMING_SNAKE_CASE__ = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE__ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE__ = MAPPING_S2S
SCREAMING_SNAKE_CASE__ = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(a_ , a_ ):
logger.info(f"""{name} was ignored""" )
continue
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".*.""" )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(a_ )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace("""*""" , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = '''weight'''
elif "running_mean" in name:
SCREAMING_SNAKE_CASE__ = '''running_mean'''
elif "running_var" in name:
SCREAMING_SNAKE_CASE__ = '''running_var'''
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE__ = '''num_batches_tracked'''
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ = name.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a_ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]=None , ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SpeechTaConfig.from_pretrained(a_ )
else:
SCREAMING_SNAKE_CASE__ = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE__ = config.max_text_positions
SCREAMING_SNAKE_CASE__ = SpeechTaForSpeechToText(a_ )
elif task == "t2s":
SCREAMING_SNAKE_CASE__ = 18_76
SCREAMING_SNAKE_CASE__ = 6_00
SCREAMING_SNAKE_CASE__ = config.max_speech_positions
SCREAMING_SNAKE_CASE__ = SpeechTaForTextToSpeech(a_ )
elif task == "s2s":
SCREAMING_SNAKE_CASE__ = 18_76
SCREAMING_SNAKE_CASE__ = config.max_speech_positions
SCREAMING_SNAKE_CASE__ = SpeechTaForSpeechToSpeech(a_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
SCREAMING_SNAKE_CASE__ = SpeechTaTokenizer(a_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken("""<mask>""" , lstrip=a_ , rstrip=a_ )
SCREAMING_SNAKE_CASE__ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = SpeechTaProcessor(tokenizer=a_ , feature_extractor=a_ )
processor.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ = torch.load(a_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , a_ , a_ )
model.save_pretrained(a_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowerCamelCase : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 713 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=7_00_00 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : List[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : Dict = (iris.target != 0) * 1
__lowerCamelCase : List[str] = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 379 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowercase__ : int = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : List[str]
UpperCAmelCase_ : Optional[List[str]]
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = """train"""
UpperCAmelCase_ : Any = """dev"""
UpperCAmelCase_ : int = """test"""
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[InputExample]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ) ->List[str]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=-100 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True , ) ->List[InputFeatures]:
lowerCAmelCase = {label: i for i, label in enumerate(__SCREAMING_SNAKE_CASE )}
lowerCAmelCase = []
for ex_index, example in enumerate(__SCREAMING_SNAKE_CASE ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = []
lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(__SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase = [sequence_a_segment_id] * len(__SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase = [cls_token] + tokens
lowerCAmelCase = [pad_token_label_id] + label_ids
lowerCAmelCase = [cls_token_segment_id] + segment_ids
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(__SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
lowerCAmelCase = max_seq_length - len(__SCREAMING_SNAKE_CASE )
if pad_on_left:
lowerCAmelCase = ([pad_token] * padding_length) + input_ids
lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(__SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(__SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(__SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , label_ids=__SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[InputFeatures]
UpperCAmelCase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = Split.train , ) ->Any:
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '''.lock'''
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
lowerCAmelCase = token_classification_task.read_examples_from_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"Saving features into cached file {cached_features_file}" )
torch.save(self.features , __SCREAMING_SNAKE_CASE )
def __len__( self ) ->str:
return len(self.features )
def __getitem__( self , __SCREAMING_SNAKE_CASE ) ->InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : List[InputFeatures]
UpperCAmelCase_ : int = -100
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = Split.train , ) ->List[Any]:
lowerCAmelCase = token_classification_task.read_examples_from_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) ->Union[str, Any]:
return len(self.features )
def __getitem__( self , __SCREAMING_SNAKE_CASE ) ->InputFeatures:
return self.features[i]
| 312 | # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase__ : Dict = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase__ : str = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
lowercase__ : List[Any] = '''|'''.join(sys.argv[1:])
lowercase__ : Tuple = re.compile(Rf'^({joined_dirs}).*?\.py$')
lowercase__ : Tuple = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 312 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = '▁'
lowerCAmelCase_ : int = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowerCAmelCase_ : Optional[int] = {
'facebook/m2m100_418M': 10_24,
}
# fmt: off
lowerCAmelCase_ : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =['input_ids', 'attention_mask']
__a =[]
__a =[]
def __init__( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Optional[Any]=None , __a : List[Any]=None , __a : Dict="<s>" , __a : Optional[Any]="</s>" , __a : Any="</s>" , __a : int="<pad>" , __a : str="<unk>" , __a : int="m2m100" , __a : Optional[Dict[str, Any]] = None , __a : int=8 , **__a : List[Any] , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_a = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__a )
for lang_code in fairseq_language_code
if self.get_lang_token(__a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , bos_token=__a , eos_token=__a , sep_token=__a , unk_token=__a , pad_token=__a , language_codes=__a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__a , **__a , )
_a = vocab_file
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(__a ): self.encoder_size + i for i, lang_code in enumerate(__a )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__a )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else "en"
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def UpperCamelCase__ ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase__ ( self : str ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self : Tuple , __a : str ):
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self : Any , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : Dict , __a : Dict ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : List[Any] , __a : Dict ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
_a = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def UpperCamelCase__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self : Optional[int] ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : int , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : Any , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def UpperCamelCase__ ( self : Tuple , __a : List[str] , __a : str = "en" , __a : Optional[List[str]] = None , __a : str = "ro" , **__a : Union[str, Any] , ):
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__a , __a , **__a )
def UpperCamelCase__ ( self : Tuple , __a : int , __a : Optional[str] , __a : Optional[str] , **__a : int ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_a = src_lang
_a = self(__a , add_special_tokens=__a , **__a )
_a = self.get_lang_id(__a )
_a = tgt_lang_id
return inputs
def UpperCamelCase__ ( self : Any ):
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self : Optional[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = self.get_lang_token(__a )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def UpperCamelCase__ ( self : Optional[int] , __a : str ):
_a = self.get_lang_token(__a )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def UpperCamelCase__ ( self : List[Any] , __a : str ):
return self.lang_code_to_token[lang]
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = self.get_lang_token(__a )
return self.lang_token_to_id[lang_token]
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 706 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ : Any = 'path-to-your-trained-model'
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
lowerCAmelCase_ : Optional[Any] = 'A photo of sks dog in a bucket'
lowerCAmelCase_ : Tuple = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 521 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A_( A : str , A : Tuple=None , A : int=None , A : Union[str, Any]=None):
UpperCamelCase = True
while ask_again:
UpperCamelCase = input(A)
try:
if default is not None and len(A) == 0:
return default
return convert_value(A) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A)
def A_( A : Any , A : Union[str, Any]=[] , A : Dict=None , A : List[str]=0):
UpperCamelCase = BulletMenu(A , A)
UpperCamelCase = menu.run(default_choice=A)
return convert_value(A) if convert_value is not None else result
def A_( A : Tuple):
UpperCamelCase = int(A)
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value])
def A_( A : List[str]):
UpperCamelCase = int(A)
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value])
def A_( A : str):
UpperCamelCase = int(A)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def A_( A : Any):
UpperCamelCase = int(A)
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value])
def A_( A : Optional[Any]):
UpperCamelCase = int(A)
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value])
def A_( A : int):
return {"yes": True, "no": False}[value.lower()]
class SCREAMING_SNAKE_CASE__ ( argparse.RawDescriptionHelpFormatter):
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = super()._format_usage(A_ , A_ , A_ , A_ )
UpperCamelCase = usage.replace('<command> [<args>] ' , '' )
return usage
| 3 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase : str = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowerCAmelCase : Any = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowerCAmelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = SavedModel()
UpperCAmelCase__ = []
with open(os.path.join(snake_case__ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
UpperCAmelCase__ = json.load(snake_case__ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case__ )] )
with open(snake_case__ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase__ = sorted(snake_case__ )
UpperCAmelCase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case__ )
if strict and len(snake_case__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(snake_case__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*snake_case__ , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 717 |
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1 / 1_2345 ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
UpperCAmelCase__ = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : List[str] = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a (__A ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """pegasus"""
lowerCAmelCase_ : List[Any] = ["""past_key_values"""]
lowerCAmelCase_ : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self ,__a=50_265 ,__a=1_024 ,__a=12 ,__a=4_096 ,__a=16 ,__a=12 ,__a=4_096 ,__a=16 ,__a=0.0 ,__a=0.0 ,__a=True ,__a=True ,__a="gelu" ,__a=1_024 ,__a=0.1 ,__a=0.0 ,__a=0.0 ,__a=0.02 ,__a=0 ,__a=False ,__a=0 ,__a=1 ,__a=1 ,**__a ,) -> Tuple:
snake_case : Tuple = vocab_size
snake_case : int = max_position_embeddings
snake_case : List[str] = d_model
snake_case : Tuple = encoder_ffn_dim
snake_case : Optional[Any] = encoder_layers
snake_case : List[str] = encoder_attention_heads
snake_case : Any = decoder_ffn_dim
snake_case : List[str] = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : List[Any] = dropout
snake_case : Dict = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : str = activation_function
snake_case : Tuple = init_std
snake_case : List[Any] = encoder_layerdrop
snake_case : List[Any] = decoder_layerdrop
snake_case : str = use_cache
snake_case : Tuple = encoder_layers
snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
@property
def snake_case_ ( self ) -> Union[str, Any]:
return self.encoder_attention_heads
@property
def snake_case_ ( self ) -> Optional[int]:
return self.d_model
| 116 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__()
__a = torchvision.models.resnetaaa(pretrained=__A )
__a = list(model.children() )[:-2]
__a = nn.Sequential(*__A )
__a = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def snake_case_ ( self , __A ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__a = self.pool(self.model(__A ) )
__a = torch.flatten(__A , start_dim=2 )
__a = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ):
__a = [json.loads(__A ) for l in open(__A )]
__a = os.path.dirname(__A )
__a = tokenizer
__a = labels
__a = len(__A )
__a = max_seq_length
__a = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , __A ):
__a = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__A ) )
__a , __a , __a = sentence[0], sentence[1:-1], sentence[-1]
__a = sentence[: self.max_seq_length]
__a = torch.zeros(self.n_classes )
__a = 1
__a = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__a = self.transforms(__A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case_ ( self ):
__a = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def a (lowerCAmelCase__ ):
__a = [len(row["""sentence"""] ) for row in batch]
__a , __a = len(lowerCAmelCase__ ), max(lowerCAmelCase__ )
__a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
__a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
__a = input_row["""sentence"""]
__a = 1
__a = torch.stack([row["""image"""] for row in batch] )
__a = torch.stack([row["""label"""] for row in batch] )
__a = torch.stack([row["""image_start_token"""] for row in batch] )
__a = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def a ():
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def a ():
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 99 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=12 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=None , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = projection_dim
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = max_position_embeddings
snake_case__ = initializer_range
snake_case__ = scope
snake_case__ = bos_token_id
def A_ ( self ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case__ = input_mask.numpy()
snake_case__ , snake_case__ = input_mask.shape
snake_case__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase ):
snake_case__ = 1
snake_case__ = 0
snake_case__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase )
def A_ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFBlipTextModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , attention_mask=lowerCamelCase , training=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Dict = (TFBlipTextModel,) if is_tf_available() else ()
_A : int = False
_A : Tuple = False
_A : int = False
def A_ ( self ):
snake_case__ = BlipTextModelTester(self )
snake_case__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A_ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A_ ( self ):
pass
@slow
def A_ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFBlipTextModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self , lowerCamelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase )
| 530 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ , snake_case__ = set(__lowerCAmelCase ), [start]
while stack:
snake_case__ = stack.pop()
explored.add(__lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCAmelCase )
return explored
__magic_name__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 530 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : Dict = get_logger(__name__)
class UpperCAmelCase_ ( enum.Enum ):
__lowerCamelCase = 'all_checks'
__lowerCamelCase = 'basic_checks'
__lowerCamelCase = 'no_checks'
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Any:
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
UpperCAmelCase__ : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase__ : Dict = """ for """ + verification_name if verification_name is not None else """"""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
class UpperCAmelCase_ ( __lowerCamelCase ):
pass
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
UpperCAmelCase__ : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase = True ) -> dict:
'''simple docstring'''
if record_checksum:
UpperCAmelCase__ : List[Any] = shaaaa()
with open(__lowerCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(__lowerCamelCase )
UpperCAmelCase__ : int = m.hexdigest()
else:
UpperCAmelCase__ : Dict = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 79 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = []
def a__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , **UpperCamelCase_ : int ):
'''simple docstring'''
self.events.append('on_init_end' )
def a__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , **UpperCamelCase_ : str ):
'''simple docstring'''
self.events.append('on_train_begin' )
def a__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_train_end' )
def a__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_epoch_begin' )
def a__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_epoch_end' )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_step_begin' )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_step_end' )
def a__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_evaluate' )
def a__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ):
'''simple docstring'''
self.events.append('on_predict' )
def a__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_save' )
def a__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_log' )
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
def a__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionModelConfig(a=UpperCamelCase_ , b=UpperCamelCase_ )
__magic_name__ = RegressionPreTrainedModel(UpperCamelCase_ )
__magic_name__ = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase_ , report_to=[] , **UpperCamelCase_ )
return Trainer(
UpperCamelCase_ , UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , callbacks=UpperCamelCase_ , )
def a__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
# Order doesn't matter
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , cba.__class__ )
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(cba.__class__ , UpperCamelCase_ )
else:
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = ['on_init_end', 'on_train_begin']
__magic_name__ = 0
__magic_name__ = len(trainer.get_eval_dataloader() )
__magic_name__ = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(UpperCamelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_trainer()
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# Callbacks passed at init are added to the default callbacks
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ = self.get_trainer(disable_tqdm=UpperCamelCase_ )
__magic_name__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(cb.__class__ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# We can also add, pop, or remove by instance
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : Any ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=UpperCamelCase_ )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# Independent log/save/eval
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# A bit of everything
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase_ ) in warn_mock.call_args[0][0] | 545 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def a ( snake_case__: jnp.ndarray , snake_case__: int , snake_case__: float = 1 , snake_case__: float = 1 , snake_case__: float = 1.0e4 , snake_case__: bool = False , snake_case__: float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
lowercase_ = float(embedding_dim // 2 )
lowercase_ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase_ = min_timescale * jnp.exp(jnp.arange(snake_case__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase_ = jnp.expand_dims(snake_case__ , 1 ) * jnp.expand_dims(snake_case__ , 0 )
# scale embeddings
lowercase_ = scale * emb
if flip_sin_to_cos:
lowercase_ = jnp.concatenate([jnp.cos(snake_case__ ), jnp.sin(snake_case__ )] , axis=1 )
else:
lowercase_ = jnp.concatenate([jnp.sin(snake_case__ ), jnp.cos(snake_case__ )] , axis=1 )
lowercase_ = jnp.reshape(snake_case__ , [jnp.shape(snake_case__ )[0], embedding_dim] )
return signal
class lowercase__( nn.Module ):
"""simple docstring"""
a :int = 32
a :jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
lowercase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.silu(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(SCREAMING_SNAKE_CASE_ )
return temb
class lowercase__( nn.Module ):
"""simple docstring"""
a :int = 32
a :bool = False
a :float = 1
@nn.compact
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 720 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def a ( snake_case__: Optional[int] , snake_case__: List[Any] ):
'''simple docstring'''
# ===== initialization =====
lowercase_ = Mock()
lowercase_ = conn, Mock()
lowercase_ = iter([1, None] )
lowercase_ = lambda snake_case__ : next(snake_case__ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=snake_case__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 409 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _snake_case ( _a ):
'''simple docstring'''
UpperCamelCase__ ="distilbert"
UpperCamelCase__ ={
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Union[str, Any] , snake_case : List[str]=30_522 , snake_case : str=512 , snake_case : Optional[int]=False , snake_case : Optional[Any]=6 , snake_case : Optional[int]=12 , snake_case : Any=768 , snake_case : Optional[int]=4 * 768 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Tuple="gelu" , snake_case : int=0.02 , snake_case : List[Any]=0.1 , snake_case : Any=0.2 , snake_case : Dict=0 , **snake_case : Any , ):
UpperCAmelCase_ :List[str] = vocab_size
UpperCAmelCase_ :int = max_position_embeddings
UpperCAmelCase_ :List[str] = sinusoidal_pos_embds
UpperCAmelCase_ :List[Any] = n_layers
UpperCAmelCase_ :List[Any] = n_heads
UpperCAmelCase_ :Any = dim
UpperCAmelCase_ :Dict = hidden_dim
UpperCAmelCase_ :Any = dropout
UpperCAmelCase_ :Union[str, Any] = attention_dropout
UpperCAmelCase_ :Optional[int] = activation
UpperCAmelCase_ :List[Any] = initializer_range
UpperCAmelCase_ :int = qa_dropout
UpperCAmelCase_ :str = seq_classif_dropout
super().__init__(**snake_case , pad_token_id=snake_case )
class _snake_case ( _a ):
'''simple docstring'''
@property
def snake_case_ ( self : Any ):
if self.task == "multiple-choice":
UpperCAmelCase_ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ :str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 608 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[str] = "decision_transformer"
lowerCamelCase__ : int = ["past_key_values"]
lowerCamelCase__ : Tuple = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a=1_7 , a=4 , a=1_2_8 , a=4_0_9_6 , a=True , a=1 , a=1_0_2_4 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1e-5 , a=0.02 , a=True , a=True , a=5_0_2_5_6 , a=5_0_2_5_6 , a=False , a=False , **a , ) -> Any:
lowercase__ : Optional[Any] = state_dim
lowercase__ : Tuple = act_dim
lowercase__ : Any = hidden_size
lowercase__ : Tuple = max_ep_len
lowercase__ : Optional[Any] = action_tanh
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : Tuple = n_positions
lowercase__ : Tuple = n_layer
lowercase__ : Tuple = n_head
lowercase__ : Union[str, Any] = n_inner
lowercase__ : Dict = activation_function
lowercase__ : int = resid_pdrop
lowercase__ : List[str] = embd_pdrop
lowercase__ : int = attn_pdrop
lowercase__ : Any = layer_norm_epsilon
lowercase__ : Any = initializer_range
lowercase__ : Any = scale_attn_weights
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[Any] = scale_attn_by_inverse_layer_idx
lowercase__ : Tuple = reorder_and_upcast_attn
lowercase__ : Union[str, Any] = bos_token_id
lowercase__ : List[Any] = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a )
| 599 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a_ : int = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a__ = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__lowerCamelCase ) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a__ = [sequences]
a__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self , SCREAMING_SNAKE_CASE=ZeroShotClassificationArgumentHandler() , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
a__ = args_parser
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=TruncationStrategy.ONLY_FIRST , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
a__ = self.tokenizer.eos_token
try:
a__ = self.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , )
except Exception as e:
if "too short" in str(__lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a__ = self.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if kwargs.get('''multi_class''' , __lowerCamelCase ) is not None:
a__ = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
a__ = {}
if "candidate_labels" in kwargs:
a__ = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
a__ = kwargs['''hypothesis_template''']
a__ = {}
if "multi_label" in kwargs:
a__ = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Dict:
if len(__lowerCamelCase ) == 0:
pass
elif len(__lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
a__ = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}" )
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This example is {}." ) -> Optional[Any]:
a__ = self._args_parser(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
a__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__lowerCamelCase ) - 1,
**model_input,
}
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
a__ = inputs['''candidate_label''']
a__ = inputs['''sequence''']
a__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
a__ = self.model(**__lowerCamelCase )
a__ = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Dict:
a__ = [outputs['''candidate_label'''] for outputs in model_outputs]
a__ = [outputs['''sequence'''] for outputs in model_outputs]
a__ = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
a__ = logits.shape[0]
a__ = len(__lowerCamelCase )
a__ = N // n
a__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a__ = self.entailment_id
a__ = -1 if entailment_id == 0 else 0
a__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
a__ = np.exp(__lowerCamelCase ) / np.exp(__lowerCamelCase ).sum(-1 , keepdims=__lowerCamelCase )
a__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a__ = reshaped_outputs[..., self.entailment_id]
a__ = np.exp(__lowerCamelCase ) / np.exp(__lowerCamelCase ).sum(-1 , keepdims=__lowerCamelCase )
a__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 702 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
a__ = data
a__ = None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
a__ = None
a__ = None
def __iter__( self ) -> Iterator[Any]:
a__ = self.head
while self.head:
yield node.data
a__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> int:
return "->".join(str(SCREAMING_SNAKE_CASE ) for item in iter(self ) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
self.insert_nth(0 , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
a__ = Node(SCREAMING_SNAKE_CASE )
if self.head is None:
a__ = new_node # first node points itself
a__ = a__ = new_node
elif index == 0: # insert at head
a__ = self.head
a__ = a__ = new_node
else:
a__ = self.head
for _ in range(index - 1 ):
a__ = temp.next
a__ = temp.next
a__ = new_node
if index == len(self ) - 1: # insert at tail
a__ = new_node
def _UpperCAmelCase ( self ) -> int:
return self.delete_nth(0 )
def _UpperCAmelCase ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
a__ = self.head
if self.head == self.tail: # just one node
a__ = a__ = None
elif index == 0: # delete head node
a__ = self.tail.next.next
a__ = self.head.next
else:
a__ = self.head
for _ in range(index - 1 ):
a__ = temp.next
a__ = temp.next
a__ = temp.next.next
if index == len(self ) - 1: # delete at tail
a__ = temp
return delete_node.data
def _UpperCAmelCase ( self ) -> bool:
return len(self ) == 0
def __a ( ):
a__ = CircularLinkedList()
assert len(__UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCAmelCase ) == i
circular_linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : int = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : List[str] , _lowerCamelCase : NestedDataStructureLike[PathLike] , _lowerCamelCase : Optional[NamedSplit] = None , _lowerCamelCase : Optional[Features] = None , _lowerCamelCase : str = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : Optional[int] , ):
super().__init__(
_lowerCamelCase , split=_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , streaming=_lowerCamelCase , num_proc=_lowerCamelCase , **_lowerCamelCase , )
snake_case__ : Tuple = path_or_paths if isinstance(_lowerCamelCase , _lowerCamelCase ) else {self.split: path_or_paths}
snake_case__ : Any = Text(
cache_dir=_lowerCamelCase , data_files=_lowerCamelCase , features=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
snake_case__ : int = None
snake_case__ : Dict = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , num_proc=self.num_proc , )
snake_case__ : str = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 170 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase_ ( __snake_case : List[str] ) -> Tuple:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for char in word:
snake_case__ :Union[str, Any] = ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def lowercase_ ( __snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Any = set()
for token in tokens:
snake_case__ :Optional[int] = len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
snake_case__ :Dict = list(_A )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : set() ) -> str:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :List[str] = max([len(_A ) for w in chinese_word_set] )
snake_case__ :Tuple = bert_tokens
snake_case__ :Tuple = 0, len(_A )
while start < end:
snake_case__ :Optional[Any] = True
if is_chinese(bert_word[start] ):
snake_case__ :Any = min(end - start , _A )
for i in range(_A , 1 , -1 ):
snake_case__ :Tuple = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :Dict = "##" + bert_word[j]
snake_case__ :str = start + i
snake_case__ :List[str] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> str:
'''simple docstring'''
snake_case__ :Optional[Any] = []
for i in range(0 , len(_A ) , 1_00 ):
snake_case__ :Optional[Any] = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
snake_case__ :int = [get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
snake_case__ :Union[str, Any] = []
for i in range(0 , len(_A ) , 1_00 ):
snake_case__ :int = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_A , truncation=_A , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(_A ) == len(_A )
snake_case__ :List[str] = []
for input_ids, chinese_word in zip(_A , _A ):
snake_case__ :Union[str, Any] = []
for id in input_ids:
snake_case__ :List[Any] = bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
snake_case__ :int = add_sub_symbol(_A , _A )
snake_case__ :List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
snake_case__ :Any = token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def lowercase_ ( __snake_case : Dict ) -> List[Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :Dict = f.readlines()
snake_case__ :List[Any] = [line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :List[str] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Any = BertTokenizer.from_pretrained(args.bert )
snake_case__ :Dict = prepare_ref(_A , _A , _A )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :List[str] = [json.dumps(_A ) + "\n" for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__UpperCAmelCase : Any = parser.parse_args()
main(args) | 705 |
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1)) | 57 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class A__ :
def __init__( self , lowerCamelCase = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
__magic_name__ : Optional[int] = primes[group]['''prime''']
__magic_name__ : List[str] = primes[group]['''generator''']
__magic_name__ : Any = int(hexlify(urandom(32 ) ) , base=16 )
def lowercase ( self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Any = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def lowercase ( self , lowerCamelCase ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowercase ( self , lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : str = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError('''Invalid public key''' )
__magic_name__ : int = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def lowercase ( lowerCamelCase , lowerCamelCase ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def lowercase ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ) -> str:
"""simple docstring"""
__magic_name__ : Dict = int(lowerCamelCase , base=16 )
__magic_name__ : int = int(lowerCamelCase , base=16 )
__magic_name__ : str = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError('''Invalid public key''' )
__magic_name__ : int = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 |
def lowerCAmelCase ( UpperCAmelCase = 6008_5147_5143 ) ->int:
"""simple docstring"""
try:
__magic_name__ : Optional[int] = int(UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__magic_name__ : List[str] = 2
__magic_name__ : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__magic_name__ : Any = i
while n % i == 0:
__magic_name__ : Union[str, Any] = n // i
i += 1
return int(UpperCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 154 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 702 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCamelCase__ ( _a ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = 5
# Realm tok
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(A_ , exist_ok=A_ )
__lowercase = os.path.join(A_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(A_ , exist_ok=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=A_ , )
return block_records
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3] , dtype="""long""" )
__lowercase = tokenizer(["""Test question"""] ).input_ids
__lowercase = tokenizer(
["""the fourth"""] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="""np""" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3, 5] , dtype="""long""" )
__lowercase = tokenizer(["""Test question"""] ).input_ids
__lowercase = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__lowercase = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowercase = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 442 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = AltDiffusionPipeline
_UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCamelCase__ = CLIPTextModel(snake_case )
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase__ = 77
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(snake_case )
else:
UpperCamelCase__ = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase__ = RobertaSeriesModelWithTransformation(snake_case )
UpperCamelCase__ = text_encoder
UpperCamelCase__ = AltDiffusionPipeline(**snake_case )
UpperCamelCase__ = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = "A photo of an astronaut"
UpperCamelCase__ = alt_pipe(**snake_case )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=snake_case )
torch.manual_seed(0 )
UpperCamelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase__ = RobertaSeriesModelWithTransformation(snake_case )
UpperCamelCase__ = text_encoder
UpperCamelCase__ = AltDiffusionPipeline(**snake_case )
UpperCamelCase__ = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = self.get_dummy_inputs(snake_case )
UpperCamelCase__ = alt_pipe(**snake_case )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=snake_case )
UpperCamelCase__ = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = alt_pipe([prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
UpperCamelCase__ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=snake_case , safety_checker=snake_case )
UpperCamelCase__ = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = alt_pipe([prompt] , generator=snake_case , num_inference_steps=2 , output_type="numpy" )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 551 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase = logging.getLogger()
__UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , snake_case ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
UpperCamelCase__ = {"source": "What is love ?", "target": "life"}
UpperCamelCase__ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase__ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case , F'''{split}.{field}''' ) , "w" ) as f:
f.write(snake_case )
def snake_case__ ( self , snake_case , snake_case = "pytorch" ):
'''simple docstring'''
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = os.path.join(snake_case , "output" )
UpperCamelCase__ = os.path.join(snake_case , "data" )
self._create_dummy_data(data_dir=snake_case )
UpperCamelCase__ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCamelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case , env=self.get_env() )
UpperCamelCase__ = os.path.join(snake_case , "metrics.json" )
with open(snake_case ) as f:
UpperCamelCase__ = json.load(snake_case )
return result
@require_torch_gpu
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 551 | 1 |
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_UpperCAmelCase , int(b / 2 ) ) * actual_power(_UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(_UpperCAmelCase , int(b / 2 ) ) * actual_power(_UpperCAmelCase , int(b / 2 ) )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
if b < 0:
return 1 / actual_power(_UpperCAmelCase , _UpperCAmelCase )
return actual_power(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 703 | from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 584 | 0 |
'''simple docstring'''
import argparse
import json
import subprocess
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : int = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , shell=SCREAMING_SNAKE_CASE__ , stdout=subprocess.PIPE )
_SCREAMING_SNAKE_CASE : int = output.stdout.decode("""utf-8""" )
_SCREAMING_SNAKE_CASE : List[Any] = json.loads(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[str] = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_SCREAMING_SNAKE_CASE : str = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return values.split(""",""" )
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
UpperCAmelCase_ : Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 533 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_SCREAMING_SNAKE_CASE : int = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_SCREAMING_SNAKE_CASE : int = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "running_mean":
_SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "running_var":
_SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "inv_freq":
_SCREAMING_SNAKE_CASE : List[str] = value
else:
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
_SCREAMING_SNAKE_CASE : str = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE : List[Any] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_SCREAMING_SNAKE_CASE : List[str] = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE : Optional[Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "pos_bias_u" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
elif "pos_bias_v" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = None
elif "weight_g" in name:
_SCREAMING_SNAKE_CASE : int = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE : Any = """weight_v"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE : Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_SCREAMING_SNAKE_CASE : int = """weight"""
elif "running_mean" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = """running_mean"""
elif "inv_freq" in name:
_SCREAMING_SNAKE_CASE : int = """inv_freq"""
elif "running_var" in name:
_SCREAMING_SNAKE_CASE : int = """running_var"""
elif "num_batches_tracked" in name:
_SCREAMING_SNAKE_CASE : Tuple = """num_batches_tracked"""
else:
_SCREAMING_SNAKE_CASE : Dict = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE : Tuple = name.split(""".""" )
_SCREAMING_SNAKE_CASE : Any = int(items[0] )
_SCREAMING_SNAKE_CASE : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
"""simple docstring"""
if config_path is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , hidden_act="""swish""" )
else:
_SCREAMING_SNAKE_CASE : List[str] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_SCREAMING_SNAKE_CASE : List[str] = """rotary"""
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE : Optional[int] = target_dict.pad_index
_SCREAMING_SNAKE_CASE : int = target_dict.bos_index
_SCREAMING_SNAKE_CASE : Any = target_dict.eos_index
_SCREAMING_SNAKE_CASE : List[str] = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : List[Any] = 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Tuple = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE : Optional[int] = True if config.feat_extract_norm == """layer""" else False
_SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = WavaVecaConformerForCTC(SCREAMING_SNAKE_CASE__ )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConformerForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE : Tuple = argparse.Namespace(task="""audio_pretraining""" )
_SCREAMING_SNAKE_CASE : List[str] = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[int] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 533 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 702 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : Tuple , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : bool = True , **a__ : Optional[int] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def __snake_case ( self : Tuple , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[Any] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
UpperCAmelCase = (size['''height'''], size['''width'''])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Optional[Any] , a__ : np.ndarray , a__ : Union[int, float] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : str , ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : int , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : Optional[bool] = None , a__ : Optional[Dict[str, int]] = None , a__ : PILImageResampling = None , a__ : Optional[bool] = None , a__ : Optional[float] = None , a__ : Optional[bool] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : bool = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Dict , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(a__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(a__ , a__ ) for image in images]
UpperCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=a__ )
return encoded_outputs
| 51 |
"""simple docstring"""
import os
def UpperCamelCase ():
UpperCamelCase : Union[str, Any] = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , """num.txt""" )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 102 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Dict=13 , __UpperCamelCase : Dict=30 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=3 , __UpperCamelCase : Any=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=32 , __UpperCamelCase : str=2 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[str]=37 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[str]=10 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Optional[Any]=None , ) -> Any:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def __UpperCamelCase ( self : Tuple ) -> Dict:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict ) -> str:
A = TFViTModel(config=__UpperCamelCase )
A = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A = self.image_size // 2
A = pixel_values[:, :, :image_size, :image_size]
A = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
A = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Union[str, Any]:
A = self.type_sequence_label_size
A = TFViTForImageClassification(__UpperCamelCase )
A = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A = self.image_size // 2
A = pixel_values[:, :, :image_size, :image_size]
A = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = TFViTForImageClassification(__UpperCamelCase )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A_ : Optional[int] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Optional[int] = False
A_ : Any = False
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = TFViTModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : List[str] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCamelCase ( self : str ) -> Any:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Layer ) )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Tuple ) -> str:
A = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
A = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# forward pass
A = model(**__UpperCamelCase )
# verify the logits
A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) | 224 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=100 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : Optional[Any]=30 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=True , __UpperCamelCase : int=32 , __UpperCamelCase : int=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=[0, 1, 2, 3] , ) -> List[Any]:
A = parent
A = 100
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = out_indices
A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : str ) -> List[str]:
A = BeitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = BeitForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
A = self.type_sequence_label_size
A = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> List[Any]:
A = self.num_labels
A = BeitForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Optional[Any] = False
A_ : str = False
A_ : Any = False
def __UpperCamelCase ( self : str ) -> Optional[int]:
A = BeitModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __UpperCamelCase ( self : List[str] ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __UpperCamelCase ( self : Dict ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]:
continue
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A = False
A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : str ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
A = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BeitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Dict ) -> Dict:
A = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).pixel_values.to(__UpperCamelCase )
# prepare bool_masked_pos
A = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(pixel_values=__UpperCamelCase , bool_masked_pos=__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCamelCase , atol=1e-2 ) )
@slow
def __UpperCamelCase ( self : str ) -> Dict:
A = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
A = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
A = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
A = model.to(__UpperCamelCase )
A = BeitImageProcessor(do_resize=__UpperCamelCase , size=640 , do_center_crop=__UpperCamelCase )
A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
A = Image.open(ds[0]['file'] )
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCamelCase , )
else:
A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
A = model.to(__UpperCamelCase )
A = BeitImageProcessor(do_resize=__UpperCamelCase , size=640 , do_center_crop=__UpperCamelCase )
A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
A = Image.open(ds[0]['file'] )
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits.detach().cpu()
A = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(500, 300)] )
A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
A = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
A = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase ) | 224 | 1 |
"""simple docstring"""
import numpy as np
def lowerCamelCase__ ( __snake_case ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _lowercase (self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def _lowercase (self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) | 714 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase ( UpperCamelCase : List[str]=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def _snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ) -> List[str]:
with TemporaryDirectory() as tmp_dir:
_lowerCamelCase = dataset_module_factory(snake_case__ , cache_dir=snake_case__ )
_lowerCamelCase = import_main_class(dataset_module.module_path , dataset=snake_case__ )
_lowerCamelCase = builder_cls(
cache_dir=snake_case__ , config_name=snake_case__ , hash=dataset_module.hash , )
_lowerCamelCase = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case__ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_lowerCamelCase = cached_path(snake_case__ , cache_dir=snake_case__ )
self.assertTrue(os.path.exists(snake_case__ ) )
@pytest.mark.integration
def lowerCamelCase ( UpperCamelCase : Any ) -> Union[str, Any]:
_lowerCamelCase = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_lowerCamelCase = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase )
_lowerCamelCase = import_main_class(dataset_module.module_path )
_lowerCamelCase = builder_cls(
cache_dir=UpperCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCamelCase = None
builder_instance.download_and_prepare()
_lowerCamelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase ( UpperCamelCase : List[str] ) -> Any:
_lowerCamelCase = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase )
_lowerCamelCase = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
_lowerCamelCase = builder_cls(
cache_dir=UpperCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
_lowerCamelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase , UpperCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase )
assert next(iter(ds['train'] ) ) | 544 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCAmelCase_ = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'The column name of the images in the files.'} )
lowerCAmelCase_ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase_ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase_ = field(
default=0.1_5 ,metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def _snake_case ( self : Union[str, Any] ) -> Dict:
_lowerCamelCase = {}
if self.train_dir is not None:
_lowerCamelCase = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase = self.validation_dir
_lowerCamelCase = data_files if data_files else None
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase_ = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCAmelCase_ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCAmelCase_ = field(
default=0.7_5 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCAmelCase_ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = field(
default=1e-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCamelCase ( UpperCamelCase : Union[str, Any] ) -> int:
_lowerCamelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0:
_lowerCamelCase = ds['train'].train_test_split(data_args.train_val_split )
_lowerCamelCase = split['train']
_lowerCamelCase = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCamelCase )
elif model_args.model_name_or_path:
_lowerCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
_lowerCamelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase )
elif model_args.model_name_or_path:
_lowerCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
_lowerCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_lowerCamelCase = ViTMAEForPreTraining(UpperCamelCase )
if training_args.do_train:
_lowerCamelCase = ds['train'].column_names
else:
_lowerCamelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
_lowerCamelCase = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase = 'image'
elif "img" in column_names:
_lowerCamelCase = 'img'
else:
_lowerCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase = image_processor.size['shortest_edge']
else:
_lowerCamelCase = (image_processor.size['height'], image_processor.size['width'])
_lowerCamelCase = Compose(
[
Lambda(lambda UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCamelCase : Tuple ):
_lowerCamelCase = [transforms(UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_lowerCamelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_lowerCamelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase )
# Compute absolute learning rate
_lowerCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_lowerCamelCase = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
_lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase = last_checkpoint
_lowerCamelCase = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , UpperCamelCase )
trainer.save_metrics('eval' , UpperCamelCase )
# Write model card and (optionally) push to hub
_lowerCamelCase = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
def lowerCamelCase ( UpperCamelCase : Any ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 544 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case (_a ):
lowerCAmelCase__ = ["pixel_values"]
def __init__( self : int , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase : List[Any] = size if size is not None else {"""height""": 384, """width""": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : List[str] = do_normalize
_lowerCAmelCase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase : Tuple = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Dict , ) -> np.ndarray:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
_lowerCAmelCase : List[str] = (size["""height"""], size["""width"""])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> int:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
_lowerCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Optional[Any] = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowerCAmelCase : int = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : str = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : Any = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase : Dict = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase : Tuple = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase : Any = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_lowerCAmelCase : str = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowerCAmelCase : str = BatchFeature(data={"""pixel_values""": images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 196 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int=13 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=0.6 , _UpperCAmelCase : Any=None , ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Tuple = mask_ratio
_lowerCAmelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : int = ViTMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : int = model(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Dict = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : str = model(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = ViTMAEModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(_UpperCAmelCase )
_lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_lowerCAmelCase : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase : Any = pt_noise
super().check_pt_tf_models(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = outputs[0].cpu().numpy()
_lowerCAmelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : str = model_class.from_pretrained(_UpperCAmelCase )
model.to(_UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
# Make sure we don't have nans
_lowerCAmelCase : int = after_outputs[0].cpu().numpy()
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = ViTMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_lowerCAmelCase : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase : Optional[int] = ViTMAEConfig()
_lowerCAmelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**_UpperCAmelCase , noise=torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase : str = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_UpperCAmelCase ) , atol=1E-4 ) )
| 196 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : str ) -> Dict:
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
__UpperCAmelCase: Optional[Any] = json.load(lowerCAmelCase__ )
__UpperCAmelCase: List[Any] = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
__UpperCAmelCase: Optional[Any] = torch.load(lowerCAmelCase__ , map_location="""cpu""" )['module']
# Load the entity vocab file
__UpperCAmelCase: Optional[Any] = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
__UpperCAmelCase: List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCAmelCase: Optional[int] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCAmelCase: Union[str, Any] = AddedToken("""<ent>""" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__UpperCAmelCase: Tuple = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """r""" ) as f:
__UpperCAmelCase: Dict = json.load(lowerCAmelCase__ )
__UpperCAmelCase: str = 'MLukeTokenizer'
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase: Tuple = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
__UpperCAmelCase: int = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
__UpperCAmelCase: Any = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
__UpperCAmelCase: int = state_dict['embeddings.word_embeddings.weight']
__UpperCAmelCase: str = word_emb[ent_init_index].unsqueeze(0 )
__UpperCAmelCase: Any = word_emb[enta_init_index].unsqueeze(0 )
__UpperCAmelCase: Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCAmelCase: Any = state_dict[bias_name]
__UpperCAmelCase: Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCAmelCase: List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCAmelCase: str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCAmelCase: Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCAmelCase: Any = state_dict[prefix + matrix_name]
__UpperCAmelCase: int = state_dict[prefix + matrix_name]
__UpperCAmelCase: Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCAmelCase: Optional[Any] = state_dict['entity_embeddings.entity_embeddings.weight']
__UpperCAmelCase: int = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__UpperCAmelCase: str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCAmelCase: Any = state_dict['entity_predictions.bias']
__UpperCAmelCase: Any = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__UpperCAmelCase: int = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCAmelCase: Dict = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
__UpperCAmelCase: List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
__UpperCAmelCase: int = state_dict[key]
else:
__UpperCAmelCase: Optional[Any] = state_dict[key]
__UpperCAmelCase: str = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCAmelCase: Dict = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task="""entity_classification""" )
__UpperCAmelCase: Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__UpperCAmelCase: int = (0, 9)
__UpperCAmelCase: Optional[Any] = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors="""pt""" )
__UpperCAmelCase: Dict = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCAmelCase: Dict = torch.Size((1, 3_3, 7_6_8) )
__UpperCAmelCase: int = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCAmelCase: List[str] = torch.Size((1, 1, 7_6_8) )
__UpperCAmelCase: str = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCAmelCase: List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase: List[Any] = 'Tokyo is the capital of <mask>.'
__UpperCAmelCase: Optional[Any] = (2_4, 3_0)
__UpperCAmelCase: List[str] = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors="""pt""" )
__UpperCAmelCase: Optional[int] = model(**lowerCAmelCase__ )
__UpperCAmelCase: Union[str, Any] = encoding['input_ids'][0].tolist()
__UpperCAmelCase: Optional[int] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
__UpperCAmelCase: Union[str, Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
__UpperCAmelCase: str = outputs.entity_logits[0][0].argmax().item()
__UpperCAmelCase: List[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def UpperCamelCase__ ( _lowercase : List[str] ) -> Optional[Any]:
__UpperCAmelCase: Dict = ['[MASK]', '[PAD]', '[UNK]']
__UpperCAmelCase: Dict = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
__UpperCAmelCase: Optional[int] = {}
for entry in data:
__UpperCAmelCase: int = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCAmelCase: Tuple = entity_id
break
__UpperCAmelCase: Dict = F'''{language}:{entity_name}'''
__UpperCAmelCase: Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 523 |
_lowercase : Any =[0, 2, 4, 6, 8]
_lowercase : List[Any] =[1, 3, 5, 7, 9]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 ,-1 ,-1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase_ : Dict = 0
for digit in range(10 ):
lowerCamelCase_ : List[Any] = digit
result += reversible_numbers(
0 ,(remainder + 2 * digit) // 10 ,lowerCAmelCase__ ,lowerCAmelCase__ )
return result
lowerCamelCase_ : List[str] = 0
for digita in range(10 ):
lowerCamelCase_ : Tuple = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase_ : Dict = ODD_DIGITS
else:
lowerCamelCase_ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase_ : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 ,(remainder + digita + digita) // 10 ,lowerCAmelCase__ ,lowerCAmelCase__ ,)
return result
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ = 9 ):
lowerCamelCase_ : int = 0
for length in range(1 ,max_power + 1 ):
result += reversible_numbers(lowerCAmelCase__ ,0 ,[0] * length ,lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__magic_name__ = nn.Parameter(A_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__magic_name__ = nn.Parameter(A_ )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = np.asarray(weights[0] )
__magic_name__ = np.asarray(weights[1] )
__magic_name__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), )
set_param(
torch_layer.self_attention.value, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), )
set_param(
torch_layer.output.dense, torch.tensor(A_ ).view(-1, A_ ).contiguous().transpose(0, 1 ), )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = np.asarray(weights[0] )
__magic_name__ = np.asarray(weights[1] )
__magic_name__ = np.asarray(weights[2] )
__magic_name__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), )
set_param(
torch_layer.self_attention.key, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), )
set_param(
torch_layer.self_attention.value, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), )
set_param(
torch_layer.output.dense, torch.tensor(A_ ).view(-1, A_ ).contiguous().transpose(0, 1 ), )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = weights[0][0][0]
__magic_name__ = np.asarray(layer_norm_a[0] )
__magic_name__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(A_ ), torch.tensor(A_ ), )
# lsh weights + output
__magic_name__ = weights[0][1]
if len(A_ ) < 4:
set_layer_weights_in_torch_lsh(A_, torch_block.attention, A_ )
else:
set_layer_weights_in_torch_local(A_, torch_block.attention, A_ )
# intermediate weighs
__magic_name__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(A_ ) == 4:
__magic_name__ = intermediate_weights[2]
# layernorm 2
__magic_name__ = np.asarray(intermediate_weights[0][0] )
__magic_name__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(A_ ), torch.tensor(A_ ), )
# intermediate dense
__magic_name__ = np.asarray(intermediate_weights[1][0] )
__magic_name__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(A_ ).transpose(0, 1 ).contiguous(), torch.tensor(A_ ), )
# intermediate out
__magic_name__ = np.asarray(intermediate_weights[4][0] )
__magic_name__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(A_ ).transpose(0, 1 ).contiguous(), torch.tensor(A_ ), )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = torch_model.reformer
# word embeds
__magic_name__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(A_ ), )
if isinstance(weights[3], A_ ):
__magic_name__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__magic_name__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__magic_name__ = nn.Parameter(torch.tensor(A_ ) )
__magic_name__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
A_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__magic_name__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(A_, A_, A_ )
# output layer norm
__magic_name__ = np.asarray(weights[7][0] )
__magic_name__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(A_ ), torch.tensor(A_ ), )
# output embeddings
__magic_name__ = np.asarray(weights[9][0] )
__magic_name__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(A_ ).transpose(0, 1 ).contiguous(), torch.tensor(A_ ), )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = ReformerConfig.from_json_file(A_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__magic_name__ = ReformerModelWithLMHead(A_ )
with open(A_, """rb""" ) as f:
__magic_name__ = pickle.load(A_ )["""weights"""]
set_model_weights_in_torch(A_, A_, config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 76 |
import math
def a__ ( A_, A_ = 0, A_ = 0 ):
'''simple docstring'''
__magic_name__ = end or len(A_ )
for i in range(A_, A_ ):
__magic_name__ = i
__magic_name__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__magic_name__ = array[temp_index - 1]
temp_index -= 1
__magic_name__ = temp_index_value
return array
def a__ ( A_, A_, A_ ): # Max Heap
'''simple docstring'''
__magic_name__ = index
__magic_name__ = 2 * index + 1 # Left Node
__magic_name__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__magic_name__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__magic_name__ = right_index
if largest != index:
__magic_name__ , __magic_name__ = array[largest], array[index]
heapify(A_, A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = len(A_ )
for i in range(n // 2, -1, -1 ):
heapify(A_, A_, A_ )
for i in range(n - 1, 0, -1 ):
__magic_name__ , __magic_name__ = array[0], array[i]
heapify(A_, 0, A_ )
return array
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = low
__magic_name__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__magic_name__ , __magic_name__ = array[j], array[i]
i += 1
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) == 0:
return array
__magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) )
__magic_name__ = 16
return intro_sort(A_, 0, len(A_ ), A_, A_ )
def a__ ( A_, A_, A_, A_, A_ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
__magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 )
__magic_name__ = partition(A_, A_, A_, A_ )
intro_sort(A_, A_, A_, A_, A_ )
__magic_name__ = p
return insertion_sort(A_, A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip()
__lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 76 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : int = TextaTextGenerationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
return generator, ["Something to write", "Something else"]
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : int = generator("Something there" )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ANY(SCREAMING_SNAKE_CASE )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
lowercase__ : Tuple = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
] , )
lowercase__ : Optional[Any] = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
generator(4 )
@require_torch
def snake_case ( self : List[str] ):
lowercase__ : List[str] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
lowercase__ : str = generator("Something there" , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ""}] )
lowercase__ : Dict = 3
lowercase__ : Any = generator(
"Something there" , num_return_sequences=SCREAMING_SNAKE_CASE , num_beams=SCREAMING_SNAKE_CASE , )
lowercase__ : Optional[int] = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = generator("This is a test" , do_sample=SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
lowercase__ : Union[str, Any] = generator.model.config.eos_token_id
lowercase__ : Tuple = "<pad>"
lowercase__ : Any = generator(
["This is a test", "This is a second test"] , do_sample=SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self : List[Any] ):
lowercase__ : Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
lowercase__ : List[Any] = generator("Something there" , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ""}] )
| 496 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = EfficientNetConfig()
lowercase__ : str = CONFIG_MAP[model_name]["hidden_dim"]
lowercase__ : Union[str, Any] = CONFIG_MAP[model_name]["width_coef"]
lowercase__ : List[Any] = CONFIG_MAP[model_name]["depth_coef"]
lowercase__ : Optional[int] = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Tuple = CONFIG_MAP[model_name]["dropout_rate"]
lowercase__ : Dict = CONFIG_MAP[model_name]["dw_padding"]
lowercase__ : str = "huggingface/label-files"
lowercase__ : List[Any] = "imagenet-1k-id2label.json"
lowercase__ : Any = 1_000
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Tuple = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[str] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase__ , )
return preprocessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowercase__ : List[str] = sorted(set(lowerCamelCase__ ) )
lowercase__ : Optional[int] = len(lowerCamelCase__ )
lowercase__ : Optional[int] = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__ , range(lowerCamelCase__ ) )}
lowercase__ : Tuple = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowercase__ : Optional[Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowercase__ : Any = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ : Optional[Any] = "efficientnet." + item[1]
lowercase__ : str = "classifier.weight"
lowercase__ : Optional[int] = "classifier.bias"
return key_mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ : Optional[int] = torch.from_numpy(lowerCamelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ : str = torch.from_numpy(lowerCamelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ : Tuple = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
lowercase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = model_classes[model_name](
include_top=lowerCamelCase__ , weights="imagenet" , input_tensor=lowerCamelCase__ , input_shape=lowerCamelCase__ , pooling=lowerCamelCase__ , classes=1_000 , classifier_activation="softmax" , )
lowercase__ : List[str] = original_model.trainable_variables
lowercase__ : Optional[int] = original_model.non_trainable_variables
lowercase__ : str = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ : List[Any] = param.numpy()
lowercase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ : List[Any] = get_efficientnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
lowercase__ : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowercase__ : Optional[Any] = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
lowercase__ : str = convert_image_processor(lowerCamelCase__ )
lowercase__ : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = hf_model(**lowerCamelCase__ )
lowercase__ : Dict = outputs.logits.detach().numpy()
# Original model inference
lowercase__ : Tuple = False
lowercase__ : Dict = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ : Dict = image.img_to_array(lowerCamelCase__ )
lowercase__ : Optional[int] = np.expand_dims(lowerCamelCase__ , axis=0 )
lowercase__ : Dict = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ : List[Any] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 496 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __A (__UpperCAmelCase , unittest.TestCase ):
snake_case :List[str] = GPTSwaTokenizer
snake_case :Dict = False
snake_case :int = True
snake_case :List[Any] = False
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = "This is a test"
__UpperCAmelCase : List[str] = "This is a test"
return input_text, output_text
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = "<s>"
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
__UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 20_00 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _snake_case ( self ):
__UpperCAmelCase : int = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
__UpperCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
__UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[int] = ["This is a test", "I was born in 92000, and this is falsé."]
__UpperCAmelCase : Optional[Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
__UpperCAmelCase : Optional[int] = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__SCREAMING_SNAKE_CASE , )
| 715 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__UpperCAmelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
def SCREAMING_SNAKE_CASE ( ) -> None:
SCREAMING_SNAKE_CASE__ = [90, 23, 6, 33, 21, 65, 123, 34_423]
SCREAMING_SNAKE_CASE__ = math.log(len(__UpperCAmelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 159 | """simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ = len(_snake_case )
SCREAMING_SNAKE_CASE__ = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE__ = array[0]
for i in range(1 , _snake_case ):
SCREAMING_SNAKE_CASE__ = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int ) -> bool:
SCREAMING_SNAKE_CASE__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_snake_case )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def A_ ( A__ ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def A_ ( A__ ) -> List[str]:
from transformers.testing_utils import pytest_terminal_summary_main
a__ : List[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 392 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def A_ ( A__ ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def A_ ( A__ ) -> List[str]:
from transformers.testing_utils import pytest_terminal_summary_main
a__ : List[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 392 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a ( __UpperCAmelCase : List[Any] ) -> str:
__magic_name__: Union[str, Any] = args.pruning_method
__magic_name__: Tuple = args.threshold
__magic_name__: Dict = args.model_name_or_path.rstrip("""/""" )
__magic_name__: str = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
__magic_name__: Optional[Any] = torch.load(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) )
__magic_name__: int = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__magic_name__: List[Any] = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
__magic_name__: List[str] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
__magic_name__: Union[str, Any] = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
__magic_name__: str = MagnitudeBinarizer.apply(inputs=__UpperCAmelCase , threshold=__UpperCAmelCase )
__magic_name__: Tuple = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__magic_name__: str = name[:-6]
__magic_name__: Dict = model[f'{prefix_}mask_scores']
__magic_name__: Any = TopKBinarizer.apply(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: List[Any] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__magic_name__: List[Any] = name[:-6]
__magic_name__: List[Any] = model[f'{prefix_}mask_scores']
__magic_name__: Tuple = ThresholdBinarizer.apply(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__magic_name__: Dict = name[:-6]
__magic_name__: int = model[f'{prefix_}mask_scores']
__magic_name__, __magic_name__: Tuple = -0.1, 1.1
__magic_name__: Tuple = torch.sigmoid(__UpperCAmelCase )
__magic_name__: Any = s * (r - l) + l
__magic_name__: Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
__magic_name__: Union[str, Any] = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__magic_name__: List[str] = os.path.join(
os.path.dirname(__UpperCAmelCase ) , f'bertarized_{os.path.basename(__UpperCAmelCase )}' )
if not os.path.isdir(__UpperCAmelCase ):
shutil.copytree(__UpperCAmelCase , __UpperCAmelCase )
print(f'\nCreated folder {target_model_path}' )
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 96 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
A__ : List[Any] = StableDiffusionLDMaDPipeline
A__ : List[str] = TEXT_TO_IMAGE_PARAMS
A__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
lowerCamelCase__ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase__ =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase__ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase__ =CLIPTextModel(_lowerCamelCase )
lowerCamelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ =torch.manual_seed(_lowerCamelCase )
else:
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1]
lowerCamelCase__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ =np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCamelCase__ =np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _a ( self ):
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =3 * [inputs["prompt"]]
# forward
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ =depth_slice_a[0, -3:, -1]
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =3 * [inputs.pop("prompt" )]
lowerCamelCase__ =ldmad_pipe.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="pt" , )
lowerCamelCase__ =text_inputs["input_ids"].to(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.text_encoder(_lowerCamelCase )[0]
lowerCamelCase__ =prompt_embeds
# forward
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =PNDMScheduler(skip_prk_steps=_lowerCamelCase )
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ ="french fries"
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1]
lowerCamelCase__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ =np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCamelCase__ =np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase__ =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCamelCase__ =np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCamelCase__ =np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =0.4_9_5_5_8_6
lowerCamelCase__ =0.3_3_7_9_5_5_1_5
lowerCamelCase__ =1_1_2.4_8_5_1_8
lowerCamelCase__ =9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =0.4_1_9_4_1_2_7
lowerCamelCase__ =0.3_5_3_7_5_5_8_6
lowerCamelCase__ =0.5_6_3_8_5_0_2
lowerCamelCase__ =0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 530 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = LxmertTokenizer
lowercase__ = LxmertTokenizerFast
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = """UNwant\u00E9d,running"""
__a : List[str] = """unwanted, running"""
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
__a : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : str = self.get_tokenizer()
__a : Union[str, Any] = self.get_rust_tokenizer()
__a : Union[str, Any] = """I was born in 92000, and this is falsé."""
__a : List[Any] = tokenizer.tokenize(__UpperCamelCase )
__a : List[str] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : List[Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : str = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(__UpperCamelCase )
__a : Dict = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[Any] ):
_A = k_size // 2
_A , _A = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_A = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) )
return g
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple ):
_A , _A = image.shape[0], image.shape[1]
# dst image height and width
_A = height - k_size + 1
_A = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_A = zeros((dst_height * dst_width, k_size * k_size) )
_A = 0
for i, j in product(range(__snake_case ) , range(__snake_case ) ):
_A = ravel(image[i : i + k_size, j : j + k_size] )
_A = window
row += 1
# turn the kernel into shape(k*k, 1)
_A = gen_gaussian_kernel(__snake_case , __snake_case )
_A = ravel(__snake_case )
# reshape and get the dst image
_A = dot(__snake_case , __snake_case ).reshape(__snake_case , __snake_case ).astype(__snake_case )
return dst
if __name__ == "__main__":
# read original image
_UpperCAmelCase : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
_UpperCAmelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_UpperCAmelCase : Tuple = gaussian_filter(gray, 3, sigma=1)
_UpperCAmelCase : int = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 107 | '''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase : Any = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_UpperCAmelCase : str = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_UpperCAmelCase : Union[str, Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : List[List[List[str]]], UpperCamelCase__ : List[List[str]], UpperCamelCase__ : int = 1, UpperCamelCase__ : int = 4, ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase__, hypotheses=UpperCamelCase__, min_len=UpperCamelCase__, max_len=UpperCamelCase__ )
}
| 107 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A_ : Tuple ={
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE_ ( snake_case : Any )-> str:
_lowerCamelCase = filter(lambda snake_case : p.requires_grad , model.parameters() )
_lowerCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
A_ : List[str] =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Union[str, Any] )-> Tuple:
if metric == "rouge2":
_lowerCamelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCamelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCamelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
_lowerCamelCase = ModelCheckpoint(
dirpath=snake_case , filename=snake_case , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Tuple )-> Optional[Any]:
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=snake_case , verbose=snake_case , )
class __a ( pl.Callback ):
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a__ )
@rank_zero_only
def snake_case_ ( self , a__ , a__ , a__ , a__=True ):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_lowerCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase = od / 'test_results.txt'
_lowerCamelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_lowerCamelCase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=a__ )
generations_file.parent.mkdir(exist_ok=a__ )
with open(a__ , 'a+' ) as writer:
for key in sorted(a__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase = metrics[key]
if isinstance(a__ , torch.Tensor ):
_lowerCamelCase = val.item()
_lowerCamelCase = F'{key}: {val:.6f}\n'
writer.write(a__ )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(a__ )
@rank_zero_only
def snake_case_ ( self , a__ , a__ ):
try:
_lowerCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase = pl_module.model.num_parameters()
_lowerCamelCase = count_trainable_parameters(a__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case_ ( self , a__ , a__ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(a__ , a__ , 'test' )
@rank_zero_only
def snake_case_ ( self , a__ , a__ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 222 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AlbertConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Any = AlbertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 105 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class __lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase : List[Any] = Features({'text': Value('string' )} )
__UpperCAmelCase : Optional[int] = Features({'summary': Value('string' )} )
__UpperCAmelCase : Dict = 'text'
__UpperCAmelCase : List[Any] = 'summary'
@property
def __UpperCAmelCase ( self ):
return {self.text_column: "text", self.summary_column: "summary"}
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'data2vec-vision'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ) -> Optional[int]:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = use_mask_token
lowerCamelCase_ = use_absolute_position_embeddings
lowerCamelCase_ = use_relative_position_bias
lowerCamelCase_ = use_shared_relative_position_bias
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase_ = out_indices
lowerCamelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = auxiliary_channels
lowerCamelCase_ = auxiliary_num_convs
lowerCamelCase_ = auxiliary_concat_input
lowerCamelCase_ = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 1e-4
| 463 |
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=None , lowercase=None ) -> List[Any]:
lowerCamelCase_ = data
lowerCamelCase_ = previous
lowerCamelCase_ = next_node
def __str__( self ) -> str:
return f'{self.data}'
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.data
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return self.next
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.previous
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = head
def __iter__( self ) -> int:
return self
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
if not self.current:
raise StopIteration
else:
lowerCamelCase_ = self.current.get_data()
lowerCamelCase_ = self.current.get_next()
return value
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> Union[str, Any]:
lowerCamelCase_ = None # First node in list
lowerCamelCase_ = None # Last node in list
def __str__( self ) -> Optional[Any]:
lowerCamelCase_ = self.head
lowerCamelCase_ = []
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase_ = current.get_next()
return " ".join(str(lowercase ) for node in nodes )
def __contains__( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase_ = current.get_next()
return False
def __iter__( self ) -> List[str]:
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
if self.head is None:
lowerCamelCase_ = node
lowerCamelCase_ = node
else:
self.insert_before_node(self.head , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
if self.head is None:
self.set_head(lowercase )
else:
self.insert_after_node(self.tail , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = Node(lowercase )
if self.head is None:
self.set_head(lowercase )
else:
self.set_tail(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = node
lowerCamelCase_ = node.previous
if node.get_previous() is None:
lowerCamelCase_ = node_to_insert
else:
lowerCamelCase_ = node_to_insert
lowerCamelCase_ = node_to_insert
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = node
lowerCamelCase_ = node.next
if node.get_next() is None:
lowerCamelCase_ = node_to_insert
else:
lowerCamelCase_ = node_to_insert
lowerCamelCase_ = node_to_insert
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = 1
lowerCamelCase_ = Node(lowercase )
lowerCamelCase_ = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase )
return
current_position += 1
lowerCamelCase_ = node.next
self.insert_after_node(self.tail , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Node:
lowerCamelCase_ = self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase_ = node.get_next()
raise Exception("Node not found" )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
if (node := self.get_node(lowercase )) is not None:
if node == self.head:
lowerCamelCase_ = self.head.get_next()
if node == self.tail:
lowerCamelCase_ = self.tail.get_previous()
self.remove_node_pointers(lowercase )
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> None:
if node.get_next():
lowerCamelCase_ = node.previous
if node.get_previous():
lowerCamelCase_ = node.next
lowerCamelCase_ = None
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.head is None
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Dict = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Tuple = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :int = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[Any] ):
requires_backends(_lowerCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[int] ):
requires_backends(_lowerCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : Optional[int] , **_lowerCamelCase : int ):
requires_backends(_lowerCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Union[str, Any] ):
requires_backends(_lowerCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : str , **_lowerCamelCase : List[Any] ):
requires_backends(_lowerCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : Dict , **_lowerCamelCase : int ):
requires_backends(_lowerCamelCase , ['''torch'''] )
def lowerCamelCase_ ( *_lowerCamelCase : Dict , **_lowerCamelCase : List[Any] ):
requires_backends(_lowerCamelCase , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Union[str, Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :int = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Union[str, Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :int = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :int = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :int = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Tuple = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Dict = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[str] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Tuple = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[int] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :int = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Optional[Any] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :Any = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :List[str] = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__lowercase :str = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowerCAmelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 720 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.id
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = weight
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
lowerCamelCase_ = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 66 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE=[2, 3, 4] , SCREAMING_SNAKE_CASE=1 , ) -> int:
"""simple docstring"""
A : Optional[Any] = parent
A : List[Any] = batch_size
A : Dict = image_size
A : Tuple = num_channels
A : Dict = embeddings_size
A : Dict = hidden_sizes
A : List[Any] = depths
A : List[Any] = is_training
A : List[str] = use_labels
A : List[str] = hidden_act
A : Any = num_labels
A : str = scope
A : int = len(_snake_case )
A : Optional[int] = out_features
A : Any = out_indices
A : Tuple = num_groups
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Tuple = None
if self.use_labels:
A : str = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = BitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A : Optional[int] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Any = self.num_labels
A : Union[str, Any] = BitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A : List[str] = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A : Dict = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Tuple = None
A : List[Any] = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A : Tuple = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
A : Tuple = config_and_inputs
A : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
__magic_name__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[int] = BitModelTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(_snake_case )
A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : str = [*signature.parameters.keys()]
A : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = model_class(config=_snake_case )
for name, module in model.named_modules():
if isinstance(_snake_case , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : Tuple = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Tuple = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple = BitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_snake_case )
A : Any = self.default_image_processor
A : Tuple = prepare_img()
A : List[str] = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**_snake_case )
# verify the logits
A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
A : Optional[int] = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
@require_torch
class A ( lowerCAmelCase_ , unittest.TestCase ):
__magic_name__ = (BitBackbone,) if is_torch_available() else ()
__magic_name__ = BitConfig
__magic_name__ = False
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[Any] = BitModelTester(self )
| 634 |
def lowercase_ (A : int , A : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case__ : List[str] = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : int = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : Dict = max(len(A ) , len(A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 0 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Dict[Optional[str], Type[Formatter]] = {}
a__ : Dict[Optional[str], str] = {}
a__ : Dict[Optional[str], Exception] = {}
def _lowerCAmelCase ( A__ , A__ , A__ = None , ):
lowercase__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowercase__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowercase__ = format_type
def _lowerCAmelCase ( A__ , A__ , A__ = None ):
lowercase__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
a__ : Optional[Any] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
a__ : Dict = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
a__ : List[Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def _lowerCAmelCase ( A__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _lowerCAmelCase ( A__ , **A__ ):
lowercase__ = get_format_type_from_alias(_A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 719 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = IFInpaintingSuperResolutionPipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _UpperCamelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def _UpperCamelCase ( self , a , a=0 ) -> Dict:
if str(a ).startswith('mps' ):
snake_case_ = torch.manual_seed(a )
else:
snake_case_ = torch.Generator(device=a ).manual_seed(a )
snake_case_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(a ) ).to(a )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCamelCase ( self ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _UpperCamelCase ( self ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCamelCase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def _UpperCamelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 198 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = '''transfo-xl'''
lowerCAmelCase = ['''mems''']
lowerCAmelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=26_77_35 , a=[2_00_00, 4_00_00, 20_00_00] , a=10_24 , a=10_24 , a=16 , a=64 , a=40_96 , a=4 , a=False , a=18 , a=16_00 , a=10_00 , a=True , a=True , a=0 , a=-1 , a=True , a=0.1 , a=0.0 , a=True , a="normal" , a=0.01 , a=0.01 , a=0.02 , a=1E-5 , a=0 , **a , ) -> Union[str, Any]:
snake_case_ = vocab_size
snake_case_ = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
snake_case_ = [False] + [True] * len(self.cutoffs )
else:
snake_case_ = [False] + [False] * len(self.cutoffs )
snake_case_ = d_model
snake_case_ = d_embed
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = div_val
snake_case_ = pre_lnorm
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = mem_len
snake_case_ = same_length
snake_case_ = attn_type
snake_case_ = clamp_len
snake_case_ = sample_softmax
snake_case_ = adaptive
snake_case_ = dropout
snake_case_ = dropatt
snake_case_ = untie_r
snake_case_ = init
snake_case_ = init_range
snake_case_ = proj_init_std
snake_case_ = init_std
snake_case_ = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def _UpperCamelCase ( self ) -> int:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _UpperCamelCase ( self , a ) -> List[str]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 198 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , A__ , )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = RobertaConfig
a = "roberta"
def __init__( self : str , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , A__ , )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = RobertaConfig
a = "roberta"
def __init__( self : Any , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ = DeeRobertaModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=-1 , __lowerCamelCase : str=False , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.num_layers
try:
SCREAMING_SNAKE_CASE__ = self.roberta(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = outputs[1]
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.classifier(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ = e.message
SCREAMING_SNAKE_CASE__ = e.exit_layer
SCREAMING_SNAKE_CASE__ = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ = entropy(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
SCREAMING_SNAKE_CASE__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 472 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = min(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_data(_A )
SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A )
SCREAMING_SNAKE_CASE__ = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 472 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase = int(snake_case_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :str=300 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__UpperCAmelCase = F'''{elt:.6f}''' if isinstance(snake_case_ , snake_case_ ) else str(snake_case_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _UpperCAmelCase :
a__ : str = 5
a__ : Any = 0.2
def __init__( self : List[Any] , _lowercase : int , _lowercase : Optional[str] = None , _lowercase : bool = True , _lowercase : Optional["NotebookTrainingTracker"] = None , _lowercase : int = 3_00 , ):
__UpperCAmelCase = total
__UpperCAmelCase = '''''' if prefix is None else prefix
__UpperCAmelCase = leave
__UpperCAmelCase = parent
__UpperCAmelCase = width
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
def a ( self : List[str] , _lowercase : int , _lowercase : bool = False , _lowercase : str = None ):
__UpperCAmelCase = value
if comment is not None:
__UpperCAmelCase = comment
if self.last_value is None:
__UpperCAmelCase = __UpperCAmelCase = time.time()
__UpperCAmelCase = __UpperCAmelCase = value
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = self.warmup
__UpperCAmelCase = 1
self.update_bar(_lowercase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__UpperCAmelCase = time.time()
__UpperCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__UpperCAmelCase = self.elapsed_time / (value - self.start_value)
else:
__UpperCAmelCase = None
if value >= self.total:
__UpperCAmelCase = self.total
__UpperCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__UpperCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(_lowercase )
__UpperCAmelCase = value
__UpperCAmelCase = current_time
if self.average_time_per_item is None:
__UpperCAmelCase = 1
else:
__UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=None ):
__UpperCAmelCase = ''' ''' * (len(str(self.total ) ) - len(str(_lowercase ) )) + str(_lowercase )
if self.elapsed_time is None:
__UpperCAmelCase = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__UpperCAmelCase = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__UpperCAmelCase = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a ( self : int ):
__UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=_lowercase )
else:
self.output.update(disp.HTML(self.html_code ) )
def a ( self : Any ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : str=None ):
super().__init__(_lowercase )
__UpperCAmelCase = None if column_names is None else [column_names]
__UpperCAmelCase = None
def a ( self : List[str] ):
__UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=_lowercase )
else:
self.output.update(disp.HTML(self.html_code ) )
def a ( self : str , _lowercase : int ):
if self.inner_table is None:
__UpperCAmelCase = [list(values.keys() ), list(values.values() )]
else:
__UpperCAmelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_lowercase )
__UpperCAmelCase = columns
self.inner_table.append([values[c] for c in columns] )
def a ( self : List[str] , _lowercase : List[str] , _lowercase : str=None , _lowercase : int=3_00 ):
__UpperCAmelCase = NotebookProgressBar(_lowercase , prefix=_lowercase , parent=self , width=_lowercase )
return self.child_bar
def a ( self : Optional[int] ):
__UpperCAmelCase = None
self.display()
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] ):
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = False
def a ( self : Tuple , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , **_lowercase : str ):
__UpperCAmelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__UpperCAmelCase = NotebookTrainingTracker(state.max_steps , _lowercase )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Tuple , **_lowercase : str ):
__UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__UpperCAmelCase = False
def a ( self : Any , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any]=None , **_lowercase : str ):
if not has_length(_lowercase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__UpperCAmelCase = self.training_tracker.add_child(len(_lowercase ) )
else:
__UpperCAmelCase = NotebookProgressBar(len(_lowercase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Optional[int] , **_lowercase : Dict ):
if self.prediction_bar is not None:
self.prediction_bar.close()
__UpperCAmelCase = None
def a ( self : List[str] , _lowercase : int , _lowercase : Tuple , _lowercase : List[str] , _lowercase : int=None , **_lowercase : List[str] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__UpperCAmelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__UpperCAmelCase = state.global_step
self.training_tracker.write_line(_lowercase )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : str , _lowercase : int=None , **_lowercase : Union[str, Any] ):
if self.training_tracker is not None:
__UpperCAmelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__UpperCAmelCase = log['''loss''']
break
if self.first_column == "Epoch":
__UpperCAmelCase = int(state.epoch )
else:
__UpperCAmelCase = state.global_step
__UpperCAmelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__UpperCAmelCase = re.sub(r'''\_loss$''' , '''''' , _lowercase )
__UpperCAmelCase = metrics.pop('''total_flos''' , _lowercase )
__UpperCAmelCase = metrics.pop('''epoch''' , _lowercase )
__UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_runtime''' , _lowercase )
__UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , _lowercase )
__UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , _lowercase )
__UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , _lowercase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__UpperCAmelCase = v
else:
__UpperCAmelCase = k.split('''_''' )
__UpperCAmelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
__UpperCAmelCase = v
self.training_tracker.write_line(_lowercase )
self.training_tracker.remove_child()
__UpperCAmelCase = None
# Evaluation takes a long time so we should force the next update.
__UpperCAmelCase = True
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : List[Any] , **_lowercase : List[str] ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=_lowercase )
__UpperCAmelCase = None
| 49 |
from math import factorial
def _a ( UpperCamelCase_ : int = 20 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(UpperCamelCase_ ) / (factorial(UpperCamelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 339 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase = 2
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,*, # begin keyword-only arguments
__UpperCAmelCase="<s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase=None ,) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = bos, unk, pad, eos
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : List[Any] = self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : int = self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.add_symbol(__UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : int = len(self.symbols )
def __eq__( self ,__UpperCAmelCase ) -> Optional[int]:
return self.indices == other.indices
def __getitem__( self ,__UpperCAmelCase ) -> Tuple:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Optional[Any]:
return len(self.symbols )
def __contains__( self ,__UpperCAmelCase ) -> int:
return sym in self.indices
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Dict = cls()
d.add_from_file(__UpperCAmelCase )
return d
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=1 ,__UpperCAmelCase=False ) -> Dict:
if word in self.indices and not overwrite:
lowerCAmelCase__ : int = self.indices[word]
lowerCAmelCase__ : str = self.count[idx] + n
return idx
else:
lowerCAmelCase__ : Any = len(self.symbols )
lowerCAmelCase__ : Union[str, Any] = idx
self.symbols.append(__UpperCAmelCase )
self.count.append(__UpperCAmelCase )
return idx
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return 0
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__UpperCAmelCase ) )
return
lowerCAmelCase__ : Optional[int] = f.readlines()
lowerCAmelCase__ : int = self._load_meta(__UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = line.rstrip().rsplit(""" """ ,1 )
if field == "#fairseq:overwrite":
lowerCAmelCase__ : Any = True
lowerCAmelCase__ , lowerCAmelCase__ : str = line.rsplit(""" """ ,1 )
else:
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Union[str, Any] = int(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__UpperCAmelCase ) )
self.add_symbol(__UpperCAmelCase ,n=__UpperCAmelCase ,overwrite=__UpperCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() )
lowerCAmelCase__ : Optional[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowerCAmelCase__ : Optional[Any] = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if not os.path.exists(UpperCamelCase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """checkpoint.pt""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
lowerCAmelCase__ : List[Any] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCAmelCase__ : Optional[int] = chkpt["""cfg"""]["""model"""]
# dicts
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , """dict.txt""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
lowerCAmelCase__ : Any = Dictionary.load(UpperCamelCase )
lowerCAmelCase__ : List[str] = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase__ : Tuple = len(UpperCamelCase )
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# merges_file (bpecodes)
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """bpecodes""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCamelCase , UpperCamelCase )
# model config
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """config.json""" )
lowerCAmelCase__ : Union[str, Any] = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# tokenizer config
lowerCAmelCase__ : str = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# model
lowerCAmelCase__ : int = chkpt["""model"""]
# remove unneeded keys
lowerCAmelCase__ : Dict = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowerCAmelCase__ : Dict = model_state_dict.pop(UpperCamelCase )
else:
lowerCAmelCase__ : Union[str, Any] = model_state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : str = BioGptConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = BioGptForCausalLM(UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase )
# save
lowerCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase , UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCamelCase , UpperCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 160 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = IFImgaImgSuperResolutionPipeline
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
__lowercase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase_ ( self ) -> List[Any]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[str]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase__ : Any = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_save_load_local()
def UpperCAmelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 160 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase : Union[str, Any] =2
class a_ :
def __init__( self : int , *, # begin keyword-only arguments
lowercase : str="<s>" , lowercase : List[str]="<pad>" , lowercase : str="</s>" , lowercase : str="<unk>" , lowercase : List[Any]=None , ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = bos, unk, pad, eos
lowercase_ :str = []
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = {}
lowercase_ :Any = self.add_symbol(lowercase )
lowercase_ :List[Any] = self.add_symbol(lowercase )
lowercase_ :Optional[int] = self.add_symbol(lowercase )
lowercase_ :int = self.add_symbol(lowercase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase )
lowercase_ :Optional[Any] = len(self.symbols )
def __eq__( self : str , lowercase : Dict ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Optional[int] , lowercase : Tuple ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self : List[str] , lowercase : Union[str, Any] ):
"""simple docstring"""
return sym in self.indices
@classmethod
def lowercase__ ( cls : Optional[int] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = cls()
d.add_from_file(lowercase )
return d
def lowercase__ ( self : Dict , lowercase : Dict , lowercase : Any=1 , lowercase : str=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
lowercase_ :Optional[int] = self.indices[word]
lowercase_ :int = self.count[idx] + n
return idx
else:
lowercase_ :int = len(self.symbols )
lowercase_ :List[Any] = idx
self.symbols.append(lowercase )
self.count.append(lowercase )
return idx
def lowercase__ ( self : List[str] , lowercase : Dict ):
"""simple docstring"""
return 0
def lowercase__ ( self : int , lowercase : List[Any] ):
"""simple docstring"""
if isinstance(lowercase , lowercase ):
try:
with open(lowercase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowercase ) )
return
lowercase_ :Optional[int] = f.readlines()
lowercase_ :Union[str, Any] = self._load_meta(lowercase )
for line in lines[indices_start_line:]:
try:
lowercase_ , lowercase_ :List[Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
lowercase_ :Optional[int] = True
lowercase_ , lowercase_ :List[Any] = line.rsplit(" " , 1 )
else:
lowercase_ :str = False
lowercase_ :str = int(lowercase )
lowercase_ :Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowercase ) )
self.add_symbol(lowercase , n=lowercase , overwrite=lowercase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase_ :Dict = dict((re.sub(r"@@$" ,"" ,__lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" ,"</w>" ,__lowerCamelCase ), v) for k, v in d.items() )
lowercase_ :Any = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase_ :str = d[k] # restore
return da
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ):
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase_ :Dict = os.path.join(__lowerCamelCase ,"checkpoint.pt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
lowercase_ :str = torch.load(__lowerCamelCase ,map_location="cpu" )
lowercase_ :Optional[int] = chkpt["cfg"]["model"]
# dicts
lowercase_ :str = os.path.join(__lowerCamelCase ,"dict.txt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
lowercase_ :Tuple = Dictionary.load(__lowerCamelCase )
lowercase_ :int = rewrite_dict_keys(src_dict.indices )
lowercase_ :List[Any] = len(__lowerCamelCase )
lowercase_ :Tuple = os.path.join(__lowerCamelCase ,VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase ,ensure_ascii=__lowerCamelCase ,indent=__lowerCamelCase ) )
# merges_file (bpecodes)
lowercase_ :Dict = os.path.join(__lowerCamelCase ,"bpecodes" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
lowercase_ :List[str] = os.path.join(__lowerCamelCase ,VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowerCamelCase ,__lowerCamelCase )
# model config
lowercase_ :Optional[Any] = os.path.join(__lowerCamelCase ,"config.json" )
lowercase_ :Union[str, Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase ,ensure_ascii=__lowerCamelCase ,indent=__lowerCamelCase ) )
# tokenizer config
lowercase_ :Dict = os.path.join(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :List[str] = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase ,ensure_ascii=__lowerCamelCase ,indent=__lowerCamelCase ) )
# model
lowercase_ :str = chkpt["model"]
# remove unneeded keys
lowercase_ :str = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
lowercase_ :List[Any] = model_state_dict.pop(__lowerCamelCase )
else:
lowercase_ :List[Any] = model_state_dict.pop(__lowerCamelCase )
lowercase_ :int = BioGptConfig.from_pretrained(__lowerCamelCase )
lowercase_ :Union[str, Any] = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
lowercase_ :int = os.path.join(__lowerCamelCase ,__lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase ,__lowerCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase : List[str] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 172 |
'''simple docstring'''
from typing import List
import numpy as np
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
lowercase_ :Dict = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase ,__lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowercase_ :Any = max(lists_lengths.values() ,default=0 )
return max(1 ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ):
lowercase_ :Tuple = []
for group_idx in range(__lowerCamelCase ):
lowercase_ :Any = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ :Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ :List[str] = range(__lowerCamelCase ,start + num_shards_to_add )
shards_indices_per_group.append(__lowerCamelCase )
return shards_indices_per_group
def UpperCAmelCase_ ( __lowerCamelCase : dict ,__lowerCamelCase : int ):
lowercase_ :Dict = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
if num_shards == 1:
return [dict(__lowerCamelCase )]
else:
lowercase_ :Optional[Any] = _distribute_shards(num_shards=__lowerCamelCase ,max_num_jobs=__lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCamelCase ,__lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCamelCase ) )
]
def UpperCAmelCase_ ( __lowerCamelCase : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCAmelCase_ ( __lowerCamelCase : np.random.Generator ,__lowerCamelCase : dict ):
lowercase_ :Tuple = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase ,__lowerCamelCase )}
lowercase_ :Optional[Any] = {}
for size in list_sizes:
lowercase_ :int = list(range(__lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ :List[Any] = dict(__lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowercase_ :List[str] = [value[i] for i in indices_per_size[len(__lowerCamelCase )]]
return shuffled_kwargs
| 172 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ : Any = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _A ( __a ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = ['input_ids', 'attention_mask']
__a = NllbTokenizer
__a = []
__a = []
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else "eng_Latn"
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> None:
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "eng_Latn" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "fra_Latn" , **SCREAMING_SNAKE_CASE__ , ) -> BatchEncoding:
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> None:
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> None:
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 274 |
"""simple docstring"""
def UpperCAmelCase__ ( ) -> int:
"""simple docstring"""
return 1
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(A__ )
def UpperCAmelCase__ ( A__ = 200 ) -> int:
"""simple docstring"""
return two_pound(A__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 274 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=14 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Any=0.02 , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =parent
lowerCamelCase__: Dict =batch_size
lowerCamelCase__: Tuple =seq_length
lowerCamelCase__: Union[str, Any] =is_training
lowerCamelCase__: Tuple =use_input_mask
lowerCamelCase__: List[Any] =use_token_type_ids
lowerCamelCase__: Union[str, Any] =use_labels
lowerCamelCase__: Dict =vocab_size
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: str =rotary_dim
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: List[str] =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: str =None
lowerCamelCase__: str =vocab_size - 1
lowerCamelCase__: List[Any] =vocab_size - 1
lowerCamelCase__: int =vocab_size - 1
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: Optional[int] =None
if self.use_input_mask:
lowerCamelCase__: Any =random_attention_mask([self.batch_size, self.seq_length])
lowerCamelCase__: List[Any] =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: Optional[int] ={"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =20
lowerCamelCase__: str =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.init_cache(input_ids.shape[0] , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Dict =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowerCamelCase__: Optional[Any] =model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: List[Any] =model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_)
lowerCamelCase__: Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =20
lowerCamelCase__: int =model_class_name(UpperCAmelCase_)
lowerCamelCase__: int =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowerCamelCase__: int =model.init_cache(input_ids.shape[0] , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowerCamelCase__: List[str] =model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: int =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
lowerCamelCase__: Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =FlaxGPTJModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
@tooslow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left")
lowerCamelCase__: Any =tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_)
lowerCamelCase__: str =FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
lowerCamelCase__: Union[str, Any] =False
lowerCamelCase__: List[str] =model.config.eos_token_id
lowerCamelCase__: Optional[Any] =jax.jit(model.generate)
lowerCamelCase__: List[str] =jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id).sequences
lowerCamelCase__: Tuple =tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Dict =[
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowerCamelCase__: List[Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] ={k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__: Optional[Any] =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__: List[Any] =getattr(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: List[Any] =pt_inputs["input_ids"].shape
lowerCamelCase__: Optional[Any] =np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase_):
lowerCamelCase__: str =0
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: int =0
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Dict =pt_model_class(UpperCAmelCase_).eval()
lowerCamelCase__: Any =model_class(UpperCAmelCase_ , dtype=jnp.floataa)
lowerCamelCase__: str =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_)
lowerCamelCase__: List[str] =fx_state
with torch.no_grad():
lowerCamelCase__: Optional[int] =pt_model(**UpperCAmelCase_).to_tuple()
lowerCamelCase__: Dict =fx_model(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_)
lowerCamelCase__: str =fx_model_loaded(**UpperCAmelCase_).to_tuple()
self.assertEqual(
len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowerCamelCase__: Union[str, Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ={k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__: Optional[int] =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__: List[Any] =getattr(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =pt_model_class(UpperCAmelCase_).eval()
lowerCamelCase__: str =model_class(UpperCAmelCase_ , dtype=jnp.floataa)
lowerCamelCase__: Union[str, Any] =load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params)
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =pt_inputs["input_ids"].shape
lowerCamelCase__: List[Any] =np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase_):
lowerCamelCase__: str =0
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Dict =0
lowerCamelCase__: int =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__: List[Any] =pt_model(**UpperCAmelCase_).to_tuple()
lowerCamelCase__: Optional[int] =fx_model(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: str =pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Optional[int] =pt_model_loaded(**UpperCAmelCase_).to_tuple()
self.assertEqual(
len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: List[Any] =model_class_name.from_pretrained("EleutherAI/gpt-j-6B")
lowerCamelCase__: List[str] =model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase_)
| 59 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'ernie_m'
__lowerCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCamelCase = 25_00_02 , __lowerCamelCase = 7_68 , __lowerCamelCase = 12 , __lowerCamelCase = 12 , __lowerCamelCase = 30_72 , __lowerCamelCase = "gelu" , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 5_14 , __lowerCamelCase = 0.02 , __lowerCamelCase = 1 , __lowerCamelCase = 1e-05 , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=0.0 , **__lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
snake_case: Tuple = vocab_size
snake_case: List[Any] = hidden_size
snake_case: Union[str, Any] = num_hidden_layers
snake_case: List[str] = num_attention_heads
snake_case: List[str] = intermediate_size
snake_case: Any = hidden_act
snake_case: Tuple = hidden_dropout_prob
snake_case: Optional[Any] = attention_probs_dropout_prob
snake_case: Optional[int] = max_position_embeddings
snake_case: Any = initializer_range
snake_case: Tuple = layer_norm_eps
snake_case: Optional[Any] = classifier_dropout
snake_case: Union[str, Any] = is_decoder
snake_case: Optional[Any] = act_dropout
| 164 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
super().setup()
snake_case: Dict = self.model.config
snake_case: Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
snake_case: Any = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
snake_case: Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> str:
'''simple docstring'''
snake_case: List[str] = outputs.logits
snake_case: int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 164 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.0 , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = "layer_norm" , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = only_cross_attention
__lowerCamelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__lowerCamelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCamelCase = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
__lowerCamelCase = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase )
else:
__lowerCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
__lowerCamelCase = Attention(
query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCamelCase = (
AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
)
__lowerCamelCase = Attention(
query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCamelCase = None
__lowerCamelCase = None
# 3. Feed-forward
__lowerCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
__lowerCamelCase = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase )
# let chunk size default to None
__lowerCamelCase = None
__lowerCamelCase = 0
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# Sets chunk feed-forward
__lowerCamelCase = chunk_size
__lowerCamelCase = dim
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__lowerCamelCase = self.norma(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.norma(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype )
else:
__lowerCamelCase = self.norma(__UpperCAmelCase )
__lowerCamelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCamelCase = self.attna(
__UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
if self.use_ada_layer_norm_zero:
__lowerCamelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCamelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCamelCase = (
self.norma(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase )
)
__lowerCamelCase = self.attna(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
__lowerCamelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCamelCase = self.norma(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
__lowerCamelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__lowerCamelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCamelCase = torch.cat(
[self.ff(__UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__lowerCamelCase = self.ff(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
__lowerCamelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCamelCase = ff_output + hidden_states
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "geglu" , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = int(dim * mult )
__lowerCamelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCamelCase = GELU(__UpperCAmelCase , __UpperCAmelCase )
if activation_fn == "gelu-approximate":
__lowerCamelCase = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
__lowerCamelCase = GEGLU(__UpperCAmelCase , __UpperCAmelCase )
elif activation_fn == "geglu-approximate":
__lowerCamelCase = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = nn.ModuleList([] )
# project in
self.net.append(__UpperCAmelCase )
# project dropout
self.net.append(nn.Dropout(__UpperCAmelCase ) )
# project out
self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__UpperCAmelCase ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
for module in self.net:
__lowerCamelCase = module(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "none" ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = approximate
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.proj(__UpperCAmelCase )
__lowerCamelCase = self.gelu(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , dim_out * 2 )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.proj(__UpperCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__UpperCAmelCase )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.proj(__UpperCAmelCase )
return x * torch.sigmoid(1.702 * x )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = nn.SiLU()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , embedding_dim * 2 )
__lowerCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.linear(self.silu(self.emb(__UpperCAmelCase ) ) )
__lowerCamelCase ,__lowerCamelCase = torch.chunk(__UpperCAmelCase , 2 )
__lowerCamelCase = self.norm(__UpperCAmelCase ) * (1 + scale) + shift
return x
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = nn.SiLU()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase )
__lowerCamelCase = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1E-6 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase ) ) )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = emb.chunk(6 , dim=1 )
__lowerCamelCase = self.norm(__UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 1E-5 ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_groups
__lowerCamelCase = eps
if act_fn is None:
__lowerCamelCase = None
else:
__lowerCamelCase = get_activation(__UpperCAmelCase )
__lowerCamelCase = nn.Linear(__UpperCAmelCase , out_dim * 2 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.act:
__lowerCamelCase = self.act(__UpperCAmelCase )
__lowerCamelCase = self.linear(__UpperCAmelCase )
__lowerCamelCase = emb[:, :, None, None]
__lowerCamelCase ,__lowerCamelCase = emb.chunk(2 , dim=1 )
__lowerCamelCase = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps )
__lowerCamelCase = x * (1 + scale) + shift
return x
| 175 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a_ = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = {}
state_dict.pop('''pixel_mean''' ,_UpperCamelCase )
state_dict.pop('''pixel_std''' ,_UpperCamelCase )
__lowerCamelCase = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCamelCase = key.replace(_UpperCamelCase ,_UpperCamelCase )
if re.match(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = int(re.match(_UpperCamelCase ,_UpperCamelCase ).group(2 ) )
if layer_nb == 0:
__lowerCamelCase = key.replace('''layers.0''' ,'''proj_in''' )
elif layer_nb == 1:
__lowerCamelCase = key.replace('''layers.1''' ,'''layers.0''' )
elif layer_nb == 2:
__lowerCamelCase = key.replace('''layers.2''' ,'''proj_out''' )
__lowerCamelCase = value
__lowerCamelCase = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[int]="ybelkada/segment-anything" ):
__lowerCamelCase = hf_hub_download(_UpperCamelCase ,F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__lowerCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
__lowerCamelCase = SamVisionConfig(
hidden_size=10_24 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
__lowerCamelCase = SamConfig(
vision_config=_UpperCamelCase ,)
elif "sam_vit_h" in model_name:
__lowerCamelCase = SamVisionConfig(
hidden_size=12_80 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
__lowerCamelCase = SamConfig(
vision_config=_UpperCamelCase ,)
__lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' )
__lowerCamelCase = replace_keys(_UpperCamelCase )
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(image_processor=_UpperCamelCase )
__lowerCamelCase = SamModel(_UpperCamelCase )
hf_model.load_state_dict(_UpperCamelCase )
__lowerCamelCase = hf_model.to('''cuda''' )
__lowerCamelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' )
__lowerCamelCase = [[[4_00, 6_50]]]
__lowerCamelCase = [[1]]
__lowerCamelCase = processor(images=np.array(_UpperCamelCase ) ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowerCamelCase = hf_model(**_UpperCamelCase )
__lowerCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__lowerCamelCase = processor(
images=np.array(_UpperCamelCase ) ,input_points=_UpperCamelCase ,input_labels=_UpperCamelCase ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowerCamelCase = hf_model(**_UpperCamelCase )
__lowerCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__lowerCamelCase = ((75, 2_75, 17_25, 8_50),)
__lowerCamelCase = processor(images=np.array(_UpperCamelCase ) ,input_boxes=_UpperCamelCase ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowerCamelCase = hf_model(**_UpperCamelCase )
__lowerCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__lowerCamelCase = [[[4_00, 6_50], [8_00, 6_50]]]
__lowerCamelCase = [[1, 1]]
__lowerCamelCase = processor(
images=np.array(_UpperCamelCase ) ,input_points=_UpperCamelCase ,input_labels=_UpperCamelCase ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__lowerCamelCase = hf_model(**_UpperCamelCase )
__lowerCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 175 | 1 |
from __future__ import annotations
def a__ (__lowercase :Dict , __lowercase :Any ) -> list[tuple[int, int]]:
_A , _A : Any = position
_A : Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A : List[str] = []
for position in positions:
_A , _A : List[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def a__ (__lowercase :Dict ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def a__ (__lowercase :List[Any] , __lowercase :Tuple , __lowercase :Optional[int] ) -> bool:
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
_A , _A : Optional[Any] = position
if board[y][x] == 0:
_A : str = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
_A : Optional[int] = 0
return False
def a__ (__lowercase :Dict ) -> list[list[int]]:
_A : Union[str, Any] = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_A : Union[str, Any] = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
_A : Union[str, Any] = 0
_A : str = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from math import pow, sqrt
def a__ (*__lowercase :float ) -> bool:
_A : List[str] = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def a__ (__lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 332 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase_ (UpperCamelCase__ : List[Any] = 5000 ):
_UpperCAmelCase : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase : Optional[int] = pentagonal_nums[j]
_UpperCAmelCase : str = pentagonal_i + pentagonal_j
_UpperCAmelCase : int = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 506 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
def UpperCamelCase ( self ) -> Any:
return {}
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
_SCREAMING_SNAKE_CASE = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
# Initialize feature_extractor
_SCREAMING_SNAKE_CASE = self.feature_extraction_class()
# Test not batched input
_SCREAMING_SNAKE_CASE = get_html_strings()[0]
_SCREAMING_SNAKE_CASE = feature_extractor(A__ )
# fmt: off
_SCREAMING_SNAKE_CASE = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
_SCREAMING_SNAKE_CASE = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , A__ )
self.assertEqual(encoding.xpaths , A__ )
# Test batched
_SCREAMING_SNAKE_CASE = get_html_strings()
_SCREAMING_SNAKE_CASE = feature_extractor(A__ )
# fmt: off
_SCREAMING_SNAKE_CASE = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
_SCREAMING_SNAKE_CASE = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A__ )
self.assertEqual(encoding.xpaths , A__ )
| 591 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=14 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]=99 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=512 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Dict=None , ) -> List[Any]:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : str = use_token_type_ids
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Tuple = use_mc_token_ids
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : Any = self.vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
if self.use_mc_token_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
UpperCAmelCase_ : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : str = CTRLModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Any ) -> List[Any]:
UpperCAmelCase_ : Any = CTRLLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : int = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , *lowerCAmelCase_ : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Dict = CTRLForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Any = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class UpperCamelCase_ (__A , __A , __A , unittest.TestCase ):
__magic_name__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__magic_name__ = (CTRLLMHeadModel,) if is_torch_available() else ()
__magic_name__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = CTRLModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = CTRLModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
pass
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
UpperCAmelCase_ : List[str] = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(lowerCAmelCase_ )
UpperCAmelCase_ : Any = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=lowerCAmelCase_ ) # Legal the president is
UpperCAmelCase_ : Dict = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 463 |
"""simple docstring"""
lowerCamelCase_ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowerCamelCase_ = {value: key for key, value in encode_dict.items()}
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case ( A__ ):
if set(A__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase_ : Dict = ""
for word in coded.split():
while len(A__ ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase_ : str = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 463 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( a_ ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = RealmTokenizer
def __init__( self : int ,_a : int=None ,_a : Any=None ,_a : int=True ,_a : str="[UNK]" ,_a : Tuple="[SEP]" ,_a : Tuple="[PAD]" ,_a : Tuple="[CLS]" ,_a : Union[str, Any]="[MASK]" ,_a : List[Any]=True ,_a : int=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : int = getattr(_a ,normalizer_state.pop('type' ) )
_a : Union[str, Any] = do_lower_case
_a : Dict = strip_accents
_a : Dict = tokenize_chinese_chars
_a : str = normalizer_class(**_a )
_a : Dict = do_lower_case
def __lowercase ( self : int ,_a : Union[str, Any] ,**_a : Optional[Any] ):
'''simple docstring'''
_a : Dict = PaddingStrategy.MAX_LENGTH
_a : Union[str, Any] = text
_a : List[str] = kwargs.pop('text_pair' ,_a )
_a : Optional[int] = kwargs.pop('return_tensors' ,_a )
_a : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
_a : Any = batch_text_pair[idx]
else:
_a : str = None
_a : Optional[Any] = super().__call__(_a ,_a ,return_tensors=_a ,**_a )
_a : Union[str, Any] = encoded_candidates.get('input_ids' )
_a : List[Any] = encoded_candidates.get('attention_mask' )
_a : Optional[Any] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
_a : Union[str, Any] = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a ,tensor_type=_a )
def __lowercase ( self : Any ,_a : Dict ,_a : int=None ):
'''simple docstring'''
_a : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Union[str, Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Tuple = [self.sep_token_id]
_a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Optional[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 229 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( __UpperCAmelCase ) -> list[int]:
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCAmelCase )
if n > 1:
factors.append(__UpperCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = 1 # (0 is vertical, 1 is horizontal)
def __lowerCamelCase ( ) -> None:
snake_case , snake_case = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
snake_case , snake_case , snake_case = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case = random_chars(32 )
snake_case = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
snake_case = []
for anno in new_annos[index]:
snake_case = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]:
snake_case = []
snake_case = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
snake_case = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
snake_case = in_file.readlines()
snake_case = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
snake_case = []
for obj_list in obj_lists:
snake_case = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]:
snake_case = []
snake_case = []
snake_case = []
for idx in range(len(__lowerCAmelCase ) ):
snake_case = []
snake_case = img_list[idx]
path_list.append(__lowerCAmelCase )
snake_case = anno_list[idx]
snake_case = cva.imread(__lowerCAmelCase )
if flip_type == 1:
snake_case = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def __lowerCamelCase ( __lowerCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
snake_case = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 717 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = "docs/source/en/_toctree.yml"
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Optional[int]:
snake_case = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case = [key for key, value in counts.items() if value > 1]
snake_case = []
for duplicate_key in duplicates:
snake_case = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def __lowerCamelCase ( __lowerCAmelCase : Any=False ) -> Optional[Any]:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case = yaml.safe_load(f.read() )
# Get to the API doc
snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case = content[api_idx]["""sections"""]
# Then to the model doc
snake_case = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case = api_doc[model_idx]["""sections"""]
snake_case = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section]
snake_case = False
for idx, modality_doc in modalities_docs:
snake_case = modality_doc["""sections"""]
snake_case = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case = True
if overwrite:
snake_case = new_modality_doc
if diff:
if overwrite:
snake_case = model_doc
snake_case = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 517 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowerCAmelCase : int ="scheduler_config.json"
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
__magic_name__ = 5
__magic_name__ = 6
__magic_name__ = 7
__magic_name__ = 8
__magic_name__ = 9
__magic_name__ = 1_0
__magic_name__ = 1_1
__magic_name__ = 1_2
__magic_name__ = 1_3
__magic_name__ = 1_4
@dataclass
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = 4_2
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = SCHEDULER_CONFIG_NAME
__magic_name__ = []
__magic_name__ = True
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , **lowerCamelCase__ , ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: List[str] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ , subfolder=lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , return_commit_hash=lowerCamelCase__ , **lowerCamelCase__ , )
return cls.from_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False , **lowerCamelCase__ ):
self.save_config(save_directory=lowerCamelCase__ , push_to_hub=lowerCamelCase__ , **lowerCamelCase__ )
@property
def _UpperCAmelCase ( self ):
return self._get_compatibles()
@classmethod
def _UpperCAmelCase ( cls ):
UpperCAmelCase__: Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase__: Union[str, Any] = importlib.import_module(__name__.split("." )[0] )
UpperCAmelCase__: List[str] = [
getattr(lowerCamelCase__ , lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ , lowerCamelCase__ )
]
return compatible_classes | 113 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __A ( __lowerCamelCase ) -> Optional[int]:
a = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionLatentUpscalePipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([] )
UpperCamelCase__ = True
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = 1
a = 4
a = (16, 16)
a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__magic_name__ )
return image
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=__magic_name__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=__magic_name__ , only_cross_attention=__magic_name__ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
a = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
a = EulerDiscreteScheduler(prediction_type="""sample""" )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
a = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = 2
a = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
a = getattr(__magic_name__ , scheduler_enum.name )
a = scheduler_cls.from_config(pipe.scheduler.config )
a = pipe(**__magic_name__ )[0]
outputs.append(__magic_name__ )
assert check_same_shape(__magic_name__ )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = torch.manual_seed(33 )
a = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
a = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
a = pipe(__magic_name__ , generator=__magic_name__ , output_type="""latent""" ).images
a = upscaler(
prompt=__magic_name__ , image=__magic_name__ , num_inference_steps=20 , guidance_scale=0 , generator=__magic_name__ , output_type="""np""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = torch.manual_seed(33 )
a = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
a = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
a = upscaler(
prompt=__magic_name__ , image=__magic_name__ , num_inference_steps=20 , guidance_scale=0 , generator=__magic_name__ , output_type="""np""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 468 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a__ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , *a__ : Optional[int] , **a__ : List[Any] ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 570 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt'}
a__ : List[str] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Any = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
UpperCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =VOCAB_FILES_NAMES
_lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase =["input_ids", "attention_mask"]
def __init__( self : Dict , a__ : Optional[int] , a__ : Optional[Any]="<unk>" , a__ : Any="<cls>" , a__ : Dict="<pad>" , a__ : int="<mask>" , a__ : List[Any]="<eos>" , **a__ : List[Any] , ):
super().__init__(**a__ )
UpperCAmelCase = load_vocab_file(a__ )
UpperCAmelCase = dict(enumerate(self.all_tokens ) )
UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase = unk_token
UpperCAmelCase = cls_token
UpperCAmelCase = pad_token
UpperCAmelCase = mask_token
UpperCAmelCase = eos_token
UpperCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __snake_case ( self : int , a__ : int ):
return self._id_to_token.get(a__ , self.unk_token )
def __snake_case ( self : int , a__ : str ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def __snake_case ( self : Any , a__ : str , **a__ : List[str] ):
return text.split()
def __snake_case ( self : str , a__ : Tuple=False ):
return len(self._id_to_token )
def __snake_case ( self : str ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __snake_case ( self : int , a__ : str ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def __snake_case ( self : Dict , a__ : int ):
return self._id_to_token.get(a__ , self.unk_token )
def __snake_case ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __snake_case ( self : Optional[int] , a__ : List , a__ : Optional[List] = None , a__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase = [1] + ([0] * len(a__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(a__ ) + [1]
return mask
def __snake_case ( self : Tuple , a__ : List[str] , a__ : Dict ):
UpperCAmelCase = os.path.join(a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(a__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __snake_case ( self : Union[str, Any] ):
return self.get_vocab_size(with_added_tokens=a__ )
def __snake_case ( self : int , a__ : Union[List[str], List[AddedToken]] , a__ : bool = False ):
return super()._add_tokens(a__ , special_tokens=a__ )
| 570 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any]=1_3 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]=9_9 , lowerCamelCase__ : Optional[int]=6_4 , lowerCamelCase__ : Dict=3_2 , lowerCamelCase__ : Union[str, Any]=5 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : Any=3_7 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Optional[Any]=5_1_2 , lowerCamelCase__ : List[str]=1_6 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Tuple=None , ) -> Any:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = embedding_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = MobileBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
A_ = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
A_ = MobileBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
A_ = MobileBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
A_ = MobileBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
A_ = MobileBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
A_ = self.num_labels
A_ = MobileBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
A_ = self.num_labels
A_ = MobileBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_choices
A_ = MobileBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Any = True
def UpperCamelCase ( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict=False ) -> Tuple:
"""simple docstring"""
A_ = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = MobileBertModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
__lowercase = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowerCamelCase__ )
A_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
A_ = model(lowerCamelCase__ )[0]
A_ = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , lowerCamelCase__ )
A_ = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCamelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
A_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
A_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 203 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowercase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowercase = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
)
__lowercase = """|""".join(sys.argv[1:])
__lowercase = re.compile(rf'^({joined_dirs}).*?\.py$')
__lowercase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 203 | 1 |
import doctest
from collections import deque
import numpy as np
class _UpperCAmelCase :
def __init__( self):
A__ = [2, 1, 2, -1]
A__ = [1, 2, 3, 4]
def snake_case_ ( self):
A__ = len(self.first_signal)
A__ = len(self.second_signal)
A__ = max(a__ , a__)
# create a zero matrix of max_length x max_length
A__ = [[0] * max_length for i in range(a__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(a__):
A__ = deque(self.second_signal)
rotated_signal.rotate(a__)
for j, item in enumerate(a__):
matrix[i][j] += item
# multiply the matrix with the first signal
A__ = np.matmul(np.transpose(a__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(a__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 526 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=56 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="block_sparse" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
lowerCamelCase_ = rescale_embeddings
lowerCamelCase_ = attention_type
lowerCamelCase_ = use_bias
lowerCamelCase_ = block_size
lowerCamelCase_ = num_random_blocks
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase_ = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="outputs" , SCREAMING_SNAKE_CASE_=None ) -> Any:
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 42 | import csv
import tweepy
# Twitter API credentials
lowerCAmelCase__ = ''
lowerCAmelCase__ = ''
lowerCAmelCase__ = ''
lowerCAmelCase__ = ''
def __lowercase ( _UpperCAmelCase ) -> None:
'''simple docstring'''
__lowercase = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=_UpperCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=_UpperCAmelCase , count=200 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(f'''...{len(_UpperCAmelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , "w" ) as f:
__lowercase = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 321 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : List[str] = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__a : List[str] = logging.get_logger(__name__)
__a : str = "T5Config"
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> str:
"""simple docstring"""
__A = jnp.zeros_like(UpperCamelCase__ )
__A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__A = shifted_input_ids.at[:, 0].set(UpperCamelCase__ )
__A = jnp.where(shifted_input_ids == -1_0_0 , UpperCamelCase__ , UpperCamelCase__ )
return shifted_input_ids
class __lowercase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = '''mt5'''
SCREAMING_SNAKE_CASE = MTaConfig
class __lowercase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = '''mt5'''
SCREAMING_SNAKE_CASE = MTaConfig
class __lowercase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = '''mt5'''
SCREAMING_SNAKE_CASE = MTaConfig
| 637 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean
UpperCAmelCase__ : Dict = image_std
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : str = rescale_factor
UpperCAmelCase__ : List[str] = do_pad
def snake_case__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
if not batched:
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * h / w)
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h)
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : int = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : str = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0]
UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessingTester(self)
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCamelCase , """image_mean"""))
self.assertTrue(hasattr(_lowerCamelCase , """image_std"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_resize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_rescale"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_pad"""))
self.assertTrue(hasattr(_lowerCamelCase , """size"""))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
def snake_case__ ( self):
pass
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray)
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self):
# prepare image and target
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Dict = json.loads(f.read())
UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCAmelCase__ : Dict = DeformableDetrImageProcessor()
UpperCAmelCase__ : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : Any = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : List[Any] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
@slow
def snake_case__ ( self):
# prepare image, target and masks_path
UpperCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Optional[int] = json.loads(f.read())
UpperCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="""coco_panoptic""")
UpperCAmelCase__ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : List[str] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Tuple = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify masks
UpperCAmelCase__ : Dict = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase)
# verify orig_size
UpperCAmelCase__ : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase)) | 407 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = 42
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = (16, 32, 96, 256)
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCAmelCase : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowerCAmelCase : Tuple = self.block_out_channels[i]
__lowerCAmelCase : str = self.block_out_channels[i + 1]
__lowerCAmelCase : Optional[int] = nn.Conv(
A_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : str = blocks
__lowerCAmelCase : Any = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.conv_in(A_ )
__lowerCAmelCase : Dict = nn.silu(A_ )
for block in self.blocks:
__lowerCAmelCase : Optional[int] = block(A_ )
__lowerCAmelCase : Tuple = nn.silu(A_ )
__lowerCAmelCase : Dict = self.conv_out(A_ )
return embedding
@flax_register_to_config
class __lowercase (nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase = False
_UpperCamelCase = (320, 640, 1280, 1280)
_UpperCamelCase = 2
_UpperCamelCase = 8
_UpperCamelCase = None
_UpperCamelCase = 1280
_UpperCamelCase = 0.0
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
_UpperCamelCase = True
_UpperCamelCase = 0
_UpperCamelCase = "rgb"
_UpperCamelCase = (16, 32, 96, 256)
def UpperCamelCase__ ( self , A_ ) ->FrozenDict:
'''simple docstring'''
__lowerCAmelCase : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase : int = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase : str = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowerCAmelCase : int = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase, __lowerCAmelCase : int = jax.random.split(A_ )
__lowerCAmelCase : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.block_out_channels
__lowerCAmelCase : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase : Tuple = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase : Tuple = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
__lowerCAmelCase : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__lowerCAmelCase : List[Any] = self.only_cross_attention
if isinstance(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
__lowerCAmelCase : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : List[str] = block_out_channels[0]
__lowerCAmelCase : int = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase : int = output_channel
__lowerCAmelCase : Tuple = block_out_channels[i]
__lowerCAmelCase : Optional[Any] = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase : Dict = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__lowerCAmelCase : Optional[Any] = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
__lowerCAmelCase : Optional[Any] = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
if not is_final_block:
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
__lowerCAmelCase : Any = down_blocks
__lowerCAmelCase : List[str] = controlnet_down_blocks
# mid
__lowerCAmelCase : List[Any] = block_out_channels[-1]
__lowerCAmelCase : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=A_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__lowerCAmelCase : List[Any] = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_ , A_ = 1.0 , A_ = True , A_ = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : Any = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowerCAmelCase : str = jnp.flip(A_ , axis=1 )
# 1. time
if not isinstance(A_ , jnp.ndarray ):
__lowerCAmelCase : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase : List[Any] = jnp.expand_dims(A_ , 0 )
__lowerCAmelCase : List[Any] = self.time_proj(A_ )
__lowerCAmelCase : List[str] = self.time_embedding(A_ )
# 2. pre-process
__lowerCAmelCase : str = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : Tuple = self.conv_in(A_ )
__lowerCAmelCase : Union[str, Any] = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : Tuple = self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
__lowerCAmelCase : int = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
__lowerCAmelCase, __lowerCAmelCase : Dict = down_block(A_ , A_ , A_ , deterministic=not train )
else:
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowerCAmelCase : Tuple = self.mid_block(A_ , A_ , A_ , deterministic=not train )
# 5. contronet blocks
__lowerCAmelCase : int = ()
for down_block_res_sample, controlnet_block in zip(A_ , self.controlnet_down_blocks ):
__lowerCAmelCase : str = controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase : Optional[Any] = controlnet_down_block_res_samples
__lowerCAmelCase : int = self.controlnet_mid_block(A_ )
# 6. scaling
__lowerCAmelCase : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_ , mid_block_res_sample=A_ )
| 583 |
from __future__ import annotations
def _lowercase ( lowercase__ ):
if len(lowercase__ ) == 0:
return array
__lowerCAmelCase, __lowerCAmelCase : List[str] = min(lowercase__ ), max(lowercase__ )
# Compute the variables
__lowerCAmelCase : int = _max - _min + 1
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowerCAmelCase : Optional[int] = i - _min
__lowerCAmelCase : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowerCAmelCase : int = 0
for i in range(lowercase__ ):
while holes_repeat[i] > 0:
__lowerCAmelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input("Enter numbers separated by comma:\n")
_UpperCamelCase = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 583 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger("""transformers.models.encodec""")
_lowerCAmelCase : List[str] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_lowerCAmelCase : str = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_lowerCAmelCase : str = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_lowerCAmelCase : List[Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_lowerCAmelCase : Any = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_lowerCAmelCase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCAmelCase : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Optional[Any] = []
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[str] )-> Any:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : str = getattr(snake_case , snake_case )
if weight_type is not None:
UpperCAmelCase__ : Optional[int] = getattr(snake_case , snake_case ).shape
else:
UpperCAmelCase__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : List[str] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : str = value
elif weight_type == "bias":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "running_mean":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase__ : str = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : Optional[int] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase__ : List[str] = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase__ : Any = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase__ : Optional[Any] = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase__ : int = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase__ : Optional[int] = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase__ : Any = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase__ : Union[str, Any] = value
else:
UpperCAmelCase__ : List[Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] , snake_case : Dict )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase__ : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase__ : Dict = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f'{name} was ignored' )
continue
UpperCAmelCase__ : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase__ : int = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCAmelCase__ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase__ : List[Any] = name.split(snake_case )[0].split("." )[-2]
UpperCAmelCase__ : List[str] = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
UpperCAmelCase__ : int = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : List[Any] = "weight_v"
elif "weight_ih_l0" in name:
UpperCAmelCase__ : Union[str, Any] = "weight_ih_l0"
elif "weight_hh_l0" in name:
UpperCAmelCase__ : str = "weight_hh_l0"
elif "bias_ih_l0" in name:
UpperCAmelCase__ : Any = "bias_ih_l0"
elif "bias_hh_l0" in name:
UpperCAmelCase__ : Union[str, Any] = "bias_hh_l0"
elif "weight_ih_l1" in name:
UpperCAmelCase__ : Tuple = "weight_ih_l1"
elif "weight_hh_l1" in name:
UpperCAmelCase__ : List[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
UpperCAmelCase__ : List[str] = "bias_ih_l1"
elif "bias_hh_l1" in name:
UpperCAmelCase__ : Optional[int] = "bias_hh_l1"
elif "bias" in name:
UpperCAmelCase__ : Optional[Any] = "bias"
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = "weight"
elif "running_mean" in name:
UpperCAmelCase__ : List[str] = "running_mean"
elif "running_var" in name:
UpperCAmelCase__ : Dict = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Tuple = "num_batches_tracked"
else:
UpperCAmelCase__ : Optional[int] = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Union[str, Any] , snake_case : int , snake_case : List[str]=None , snake_case : Optional[int]=None , )-> Dict:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : List[Any] = EncodecConfig.from_pretrained(snake_case )
else:
UpperCAmelCase__ : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase__ : int = [8, 5, 4, 4]
UpperCAmelCase__ : int = [2.2]
UpperCAmelCase__ : str = 64
UpperCAmelCase__ : Tuple = 3_2000
UpperCAmelCase__ : str = 2048
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
elif model_name == "encodec_48khz":
UpperCAmelCase__ : int = [8, 5, 4, 2]
UpperCAmelCase__ : str = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase__ : Union[str, Any] = 4_8000
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = "time_group_norm"
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Tuple = 1.0
UpperCAmelCase__ : str = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
UpperCAmelCase__ : List[Any] = EncodecModel(snake_case )
UpperCAmelCase__ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(snake_case )
UpperCAmelCase__ : Optional[Any] = torch.load(snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase__ : Optional[Any] = original_checkpoint["best_state"]
recursively_load_weights(snake_case , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 438 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase : Optional[int] = 2
class lowerCAmelCase__ :
def __init__( self : Any , *, # begin keyword-only arguments
snake_case__ : List[str]="<s>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="</s>" , snake_case__ : Any="<unk>" , snake_case__ : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = bos, unk, pad, eos
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Tuple = self.add_symbol(snake_case__ )
UpperCAmelCase__ : str = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(snake_case__ )
UpperCAmelCase__ : List[str] = len(self.symbols )
def __eq__( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def __a ( cls : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = cls()
d.add_from_file(snake_case__ )
return d
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict=1 , snake_case__ : Dict=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase__ : List[Any] = self.indices[word]
UpperCAmelCase__ : Dict = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Optional[Any] = len(self.symbols )
UpperCAmelCase__ : List[str] = idx
self.symbols.append(snake_case__ )
self.count.append(snake_case__ )
return idx
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
return 0
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case__ ) )
return
UpperCAmelCase__ : Optional[int] = f.readlines()
UpperCAmelCase__ : Dict = self._load_meta(snake_case__ )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rsplit(" " , 1 )
else:
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = int(snake_case__ )
UpperCAmelCase__ : List[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(snake_case__ ) )
self.add_symbol(snake_case__ , n=snake_case__ , overwrite=snake_case__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = dict((re.sub(r"@@$" , "" , snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , snake_case ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
UpperCAmelCase__ : List[str] = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int )-> Union[str, Any]:
'''simple docstring'''
if not os.path.exists(snake_case ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
UpperCAmelCase__ : Dict = os.path.join(snake_case , "checkpoint.pt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
UpperCAmelCase__ : Optional[int] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Optional[int] = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , "dict.txt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
UpperCAmelCase__ : Dict = Dictionary.load(snake_case )
UpperCAmelCase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : Tuple = os.path.join(snake_case , VOCAB_FILES_NAMES["vocab_file"] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "bpecodes" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
UpperCAmelCase__ : int = os.path.join(snake_case , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(snake_case , snake_case )
# model config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "config.json" )
UpperCAmelCase__ : Any = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , snake_case )
UpperCAmelCase__ : str = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
UpperCAmelCase__ : Union[str, Any] = chkpt["model"]
# remove unneeded keys
UpperCAmelCase__ : Union[str, Any] = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase__ : int = model_state_dict.pop(snake_case )
else:
UpperCAmelCase__ : Tuple = model_state_dict.pop(snake_case )
UpperCAmelCase__ : Any = BioGptConfig.from_pretrained(snake_case )
UpperCAmelCase__ : List[Any] = BioGptForCausalLM(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case )
# save
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print("Conversion is done!" )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 438 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__snake_case : List[Any] = False
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :str = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ :List[Any] = torch.manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
UpperCAmelCase__ :Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ :List[str] = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 433 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=24 , A=2 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=None , A=10_00 , ) ->Any:
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :List[str] = batch_size
UpperCAmelCase__ :Optional[int] = seq_length
UpperCAmelCase__ :str = is_training
UpperCAmelCase__ :Tuple = use_input_mask
UpperCAmelCase__ :Optional[int] = use_token_type_ids
UpperCAmelCase__ :int = use_labels
UpperCAmelCase__ :Tuple = vocab_size
UpperCAmelCase__ :int = hidden_size
UpperCAmelCase__ :Any = num_hidden_layers
UpperCAmelCase__ :List[Any] = num_attention_heads
UpperCAmelCase__ :Tuple = intermediate_size
UpperCAmelCase__ :List[str] = hidden_act
UpperCAmelCase__ :Any = hidden_dropout_prob
UpperCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :List[str] = max_position_embeddings
UpperCAmelCase__ :str = type_vocab_size
UpperCAmelCase__ :int = type_sequence_label_size
UpperCAmelCase__ :int = initializer_range
UpperCAmelCase__ :str = num_labels
UpperCAmelCase__ :Tuple = scope
UpperCAmelCase__ :int = range_bbox
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ :List[Any] = bbox[i, j, 3]
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 1]
UpperCAmelCase__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ :Any = bbox[i, j, 2]
UpperCAmelCase__ :Dict = bbox[i, j, 0]
UpperCAmelCase__ :Optional[Any] = t
UpperCAmelCase__ :int = None
if self.use_input_mask:
UpperCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ :int = None
if self.use_token_type_ids:
UpperCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ :List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A__ ( self , A , A , A , A , A , A , A , ) ->Any:
UpperCAmelCase__ :Any = LiltModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(A , bbox=A , attention_mask=A , token_type_ids=A )
UpperCAmelCase__ :List[str] = model(A , bbox=A , token_type_ids=A )
UpperCAmelCase__ :int = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Dict:
UpperCAmelCase__ :List[str] = self.num_labels
UpperCAmelCase__ :Optional[Any] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :str = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :str = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) :Dict = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = False
__a : int = False
def A__ ( self , A , A , A , A , A ) ->str:
return True
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Dict = LiltModelTester(self )
UpperCAmelCase__ :Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A__ ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ :Optional[int] = type
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Any:
UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def A__ ( self ) ->int:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ :Union[str, Any] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->int:
UpperCAmelCase__ :int = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A )
UpperCAmelCase__ :List[Any] = torch.tensor([[1, 2]] , device=A )
UpperCAmelCase__ :Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ :Union[str, Any] = model(input_ids=A , bbox=A )
UpperCAmelCase__ :Tuple = torch.Size([1, 2, 7_68] )
UpperCAmelCase__ :Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1e-3 ) )
| 433 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A: Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _snake_case ( UpperCamelCase : Optional[int] ):
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase : List[Any] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict ):
UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase__ )
UpperCAmelCase : Dict = PegasusConfig(**UpperCAmelCase__ )
UpperCAmelCase : Optional[int] = PegasusForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
UpperCAmelCase : Dict = {}
for k, v in tf_weights.items():
UpperCAmelCase : int = rename_state_dict_key(UpperCAmelCase__ )
if new_k not in sd:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase : Any = v.T
UpperCAmelCase : Tuple = torch.tensor(UpperCAmelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase : int = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
UpperCAmelCase : Tuple = mapping["""shared.weight"""]
UpperCAmelCase : int = mapping["""shared.weight"""]
UpperCAmelCase : List[Any] = {k: torch.zeros_like(UpperCAmelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCAmelCase__ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = torch_model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
UpperCAmelCase : str = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def _snake_case ( UpperCamelCase : Dict="./ckpt/aeslc/model.ckpt-32000" ):
UpperCAmelCase : List[str] = tf.train.list_variables(UpperCAmelCase__ )
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCAmelCase__ , desc="""converting tf checkpoint to dict""" ):
UpperCAmelCase : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase : Any = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase : List[Any] = array
return tf_weights
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
# save tokenizer first
UpperCAmelCase : int = Path(UpperCAmelCase__ ).parent.name
UpperCAmelCase : Union[str, Any] = task_specific_params[F"summarization_{dataset}"]["""max_position_embeddings"""]
UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase__ )
# convert model
UpperCAmelCase : str = get_tf_weights_as_numpy(UpperCAmelCase__ )
UpperCAmelCase : List[Any] = task_specific_params[F"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase : List[Any] = task_specific_params
UpperCAmelCase : int = convert_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase : Any = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCAmelCase__ , Path(UpperCAmelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
A: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
A: Dict = parser.parse_args()
if args.save_dir is None:
A: Union[str, Any] = Path(args.tf_ckpt_path).parent.name
A: Optional[int] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 160 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCamelCase__ , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowercase_ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowercase_ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowercase_ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase__ )
| 412 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
_snake_case = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCamelCase : List[str] = int(re.match(r".*layer_(\d*).*" , SCREAMING_SNAKE_CASE_ )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
lowerCamelCase : str = re.search(r"[^\d](\d+)$" , str(SCREAMING_SNAKE_CASE_ ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
lowerCamelCase : Tuple = int(bit_search.groups()[0] )
return bit_size // 8
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if bloom_config_file == "":
lowerCamelCase : List[str] = BloomConfig()
else:
lowerCamelCase : Optional[int] = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
if shard_model:
lowerCamelCase : int = os.listdir(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : List[str] = {"weight_map": {}, "metadata": {}}
lowerCamelCase : Tuple = 0
lowerCamelCase : List[str] = None
lowerCamelCase : Any = BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE_ ):
print("Processing file: {}".format(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : List[str] = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
lowerCamelCase : List[str] = file.replace("model_00" , f"""model_0{i}""" )
lowerCamelCase : Any = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCamelCase : str = list(temp.keys() )
for key in keys:
lowerCamelCase : Optional[int] = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
lowerCamelCase : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase : Dict = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase : Tuple = tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE_ , os.path.join(
SCREAMING_SNAKE_CASE_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCamelCase : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCamelCase : str = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) )
lowerCamelCase : Optional[int] = BloomConfig()
lowerCamelCase : List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowerCamelCase : Dict = total_size
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
lowerCamelCase : Optional[int] = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
f.write(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[Any] = BloomModel(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = os.listdir(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : int = None
for i, file in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
lowerCamelCase : Dict = file.replace("model_00" , f"""model_0{i}""" )
lowerCamelCase : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCamelCase : Any = list(temp.keys() )
for key in keys:
lowerCamelCase : Optional[Any] = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
lowerCamelCase : Optional[int] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase : Any = tensors[key] / pretraining_tp
lowerCamelCase : Any = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowerCamelCase : Any = set(other_keys.missing_keys )
else:
lowerCamelCase : List[str] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowerCamelCase : Any = model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
_snake_case = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 231 |
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case_ : Union[str, Any] = get_sagemaker_input()
else:
snake_case_ : Union[str, Any] = get_cluster_input()
return config
def lowerCamelCase_ ( _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : Tuple = subparsers.add_parser('''config''' , description=_UpperCamelCase )
else:
snake_case_ : Tuple = argparse.ArgumentParser('''Accelerate config command''' , description=_UpperCamelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = get_user_input()
if args.config_file is not None:
snake_case_ : Tuple = args.config_file
else:
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
snake_case_ : Tuple = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_UpperCamelCase )
else:
config.to_yaml_file(_UpperCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = config_command_parser()
snake_case_ : Any = parser.parse_args()
config_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 60 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 0 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
lowerCAmelCase_ : Any =OmegaConf.load(_SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) )
return config
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if conf_path is None:
lowerCAmelCase_ : Union[str, Any] ='''./model_checkpoints/vqgan_only.yaml'''
lowerCAmelCase_ : Optional[int] =load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] =VQModel(**config.model.params )
if ckpt_path is None:
lowerCAmelCase_ : Optional[int] ='''./model_checkpoints/vqgan_only.pt'''
lowerCAmelCase_ : int =torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
lowerCAmelCase_ : Union[str, Any] =sd['''state_dict''']
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
del sd
return model
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] =model.encode(_SCREAMING_SNAKE_CASE )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCAmelCase_ : List[str] =model.decode(_SCREAMING_SNAKE_CASE )
return xrec
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple =string.rsplit('''.''' , 1 )
if reload:
lowerCAmelCase_ : Optional[int] =importlib.import_module(_SCREAMING_SNAKE_CASE )
importlib.reload(_SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True ):
lowerCAmelCase_ : Dict =instantiate_from_config(_SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(_SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# load the specified checkpoint
if ckpt:
lowerCAmelCase_ : Optional[Any] =torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
lowerCAmelCase_ : Optional[Any] =pl_sd['''global_step''']
print(f'loaded model from global step {global_step}.' )
else:
lowerCAmelCase_ : List[Any] ={'''state_dict''': None}
lowerCAmelCase_ : Union[str, Any] =None
lowerCAmelCase_ : Tuple =load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )['''model''']
return model, global_step
| 305 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = GPTSwaTokenizer
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[str] = False
def __A ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Union[str, Any] =GPTSwaTokenizer(UpperCamelCase_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase_ : Optional[int] ='''This is a test'''
lowerCAmelCase_ : int ='''This is a test'''
return input_text, output_text
def __A ( self : List[str] ):
lowerCAmelCase_ : List[str] ='''<s>'''
lowerCAmelCase_ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 2000 )
def __A ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : int =GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase_ : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase_ : List[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
lowerCAmelCase_ : Dict =tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase_ : int =tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __A ( self : str ):
lowerCAmelCase_ : List[Any] =GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase_ : str =['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowerCAmelCase_ : List[Any] =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
lowerCAmelCase_ : Union[str, Any] =[
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowerCAmelCase_ : List[str] ={'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCamelCase_ , )
| 305 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Dict = KandinskyVaaPriorPipeline
a : List[Any] = ['prompt']
a : int = ['prompt', 'negative_prompt']
a : List[str] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
a : Any = False
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return 32
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
return 32
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
return 100
@property
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__UpperCAmelCase : Any = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCAmelCase : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__UpperCAmelCase : Any = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : str = self.dummy_prior
__UpperCAmelCase : List[str] = self.dummy_image_encoder
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : Optional[int] = self.dummy_tokenizer
__UpperCAmelCase : Any = self.dummy_image_processor
__UpperCAmelCase : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__UpperCAmelCase : Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Tuple=0 ) -> int:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : str = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : str = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = """cpu"""
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowercase )
__UpperCAmelCase : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Any = output.image_embeds
__UpperCAmelCase : str = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : int = image[0, -10:]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__UpperCAmelCase : str = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = torch_device == """cpu"""
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase : Optional[int] = torch_device == """cpu"""
__UpperCAmelCase : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCAmelCase ( __lowerCamelCase : float ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ = logging.get_logger(__name__)
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : Union[str, Any])-> None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_)
| 456 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__UpperCAmelCase = ["""accelerate""", """launch"""]
__UpperCAmelCase = Path.home() / """.cache/huggingface/accelerate"""
__UpperCAmelCase = """default_config.yaml"""
__UpperCAmelCase = config_folder / config_file
__UpperCAmelCase = config_folder / """_default_config.yaml"""
__UpperCAmelCase = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> int:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Any = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=snake_case ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(snake_case ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = """test-tpu"""
__UpperCAmelCase = """us-central1-a"""
__UpperCAmelCase = """ls"""
__UpperCAmelCase = ["""accelerate""", """tpu-config"""]
__UpperCAmelCase = """cd /usr/share"""
__UpperCAmelCase = """tests/test_samples/test_command_file.sh"""
__UpperCAmelCase = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : Dict = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=snake_case )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : int = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : int = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , snake_case , )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : int = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , snake_case , )
| 607 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __snake_case ( __A ,__A = "cpu" ,__A = None ) -> None:
lowercase : Optional[int] = torch.load(__A ,map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A ,torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowercase : List[str] = v.half()
if save_path is None: # overwrite src_path
lowercase : List[str] = src_path
torch.save(__A ,__A )
if __name__ == "__main__":
fire.Fire(convert)
| 607 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( snake_case_ ):
_UpperCamelCase: UNetaDModel
_UpperCamelCase: ScoreSdeVeScheduler
def __init__( self , lowercase_ , lowercase_ ) -> Dict:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = 2000 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , **lowercase_ , ) -> Union[ImagePipelineOutput, Tuple]:
lowerCAmelCase : List[str] = self.unet.config.sample_size
lowerCAmelCase : int = (batch_size, 3, img_size, img_size)
lowerCAmelCase : Optional[int] = self.unet
lowerCAmelCase : Optional[Any] = randn_tensor(lowercase_ , generator=lowercase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
self.scheduler.set_sigmas(lowercase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase : Tuple = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : List[Any] = self.scheduler.step_correct(lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# prediction step
lowerCAmelCase : Optional[Any] = model(lowercase_ , lowercase_ ).sample
lowerCAmelCase : int = self.scheduler.step_pred(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
lowerCAmelCase , lowerCAmelCase : int = output.prev_sample, output.prev_sample_mean
lowerCAmelCase : Optional[int] = sample_mean.clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : int = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase_ )
| 693 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.model'}
_lowerCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowerCamelCase = {
'google/rembert': 2_56,
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Optional[int]=True , __snake_case : Any="[CLS]" , __snake_case : Dict="[SEP]" , __snake_case : Dict="[UNK]" , __snake_case : List[str]="[SEP]" , __snake_case : Dict="[PAD]" , __snake_case : Optional[Any]="[CLS]" , __snake_case : str="[MASK]" , **__snake_case : Optional[int] , ):
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(__snake_case )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : str , __snake_case : str ):
UpperCAmelCase_ = d
UpperCAmelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any]=False ):
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(__snake_case )
return pieces
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Optional[int] ):
return self.sp_model.PieceToId(__snake_case )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return self.sp_model.IdToPiece(__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : List[Any] ):
UpperCAmelCase_ = self.sp_model.decode_pieces(__snake_case )
return out_string
def lowerCamelCase_ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
UpperCAmelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 144 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> list[int]:
UpperCAmelCase_ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
for j in range(i + 1 , __UpperCamelCase ):
if numbers[j] < numbers[i]:
UpperCAmelCase_ , UpperCAmelCase_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 144 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = {}
state_dict.pop("""pixel_mean""" ,lowerCAmelCase__ )
state_dict.pop("""pixel_std""" ,lowerCAmelCase__ )
lowercase = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase = key.replace(lowerCAmelCase__ ,lowerCAmelCase__ )
if re.match(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = int(re.match(lowerCAmelCase__ ,lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
lowercase = key.replace("""layers.0""" ,"""proj_in""" )
elif layer_nb == 1:
lowercase = key.replace("""layers.1""" ,"""layers.0""" )
elif layer_nb == 2:
lowercase = key.replace("""layers.2""" ,"""proj_out""" )
lowercase = value
lowercase = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__="ybelkada/segment-anything" ):
lowercase = hf_hub_download(lowerCAmelCase__ ,f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
lowercase = SamConfig()
elif "sam_vit_l" in model_name:
lowercase = SamVisionConfig(
hidden_size=1_024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
lowercase = SamConfig(
vision_config=lowerCAmelCase__ ,)
elif "sam_vit_h" in model_name:
lowercase = SamVisionConfig(
hidden_size=1_280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
lowercase = SamConfig(
vision_config=lowerCAmelCase__ ,)
lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" )
lowercase = replace_keys(lowerCAmelCase__ )
lowercase = SamImageProcessor()
lowercase = SamProcessor(image_processor=lowerCAmelCase__ )
lowercase = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.to("""cuda""" )
lowercase = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
lowercase = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
lowercase = [[[400, 650]]]
lowercase = [[1]]
lowercase = processor(images=np.array(lowerCAmelCase__ ) ,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowercase = processor(
images=np.array(lowerCAmelCase__ ) ,input_points=lowerCAmelCase__ ,input_labels=lowerCAmelCase__ ,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowercase = ((75, 275, 1_725, 850),)
lowercase = processor(images=np.array(lowerCAmelCase__ ) ,input_boxes=lowerCAmelCase__ ,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowercase = [[[400, 650], [800, 650]]]
lowercase = [[1, 1]]
lowercase = processor(
images=np.array(lowerCAmelCase__ ) ,input_points=lowerCAmelCase__ ,input_labels=lowerCAmelCase__ ,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : List[Any] =['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
__SCREAMING_SNAKE_CASE : int =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 703 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str ={
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A_ ( __a ):
_A :Tuple = '''data2vec-audio'''
def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowercase = hidden_size
lowercase = feat_extract_activation
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = conv_pos_kernel_size
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return math.prod(self.conv_stride )
| 72 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : List[str] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : Union[str, Any] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : Optional[Any] =""""""
else:
__magic_name__ : List[Any] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Tuple =state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
__magic_name__ : Optional[Any] =state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Union[str, Any] =in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : Any =in_proj_bias[: config.hidden_size]
__magic_name__ : Any =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Union[str, Any] =in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : Any =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__magic_name__ : List[Any] =[
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =dct.pop(lowerCamelCase )
__magic_name__ : List[str] =val
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =ViTMSNConfig()
__magic_name__ : str =1000
__magic_name__ : Tuple ="""datasets/huggingface/label-files"""
__magic_name__ : Union[str, Any] ="""imagenet-1k-id2label.json"""
__magic_name__ : int =json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase ) , """r""" ) )
__magic_name__ : Dict ={int(lowerCamelCase ): v for k, v in idalabel.items()}
__magic_name__ : int =idalabel
__magic_name__ : List[Any] ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__magic_name__ : int =384
__magic_name__ : List[str] =1536
__magic_name__ : Optional[int] =6
elif "l16" in checkpoint_url:
__magic_name__ : Tuple =1024
__magic_name__ : Dict =4096
__magic_name__ : Tuple =24
__magic_name__ : Tuple =16
__magic_name__ : List[str] =0.1
elif "b4" in checkpoint_url:
__magic_name__ : str =4
elif "l7" in checkpoint_url:
__magic_name__ : List[Any] =7
__magic_name__ : Dict =1024
__magic_name__ : Any =4096
__magic_name__ : Union[str, Any] =24
__magic_name__ : Tuple =16
__magic_name__ : Any =0.1
__magic_name__ : List[str] =ViTMSNModel(lowerCamelCase )
__magic_name__ : str =torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="""cpu""" )["""target_encoder"""]
__magic_name__ : Dict =ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
__magic_name__ : Dict =create_rename_keys(lowerCamelCase , base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
__magic_name__ : List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Dict =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__magic_name__ : Tuple =ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase , image_std=lowerCamelCase )
__magic_name__ : Tuple =image_processor(images=lowerCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__magic_name__ : Dict =model(**lowerCamelCase )
__magic_name__ : List[Any] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__magic_name__ : Any =torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
__magic_name__ : Optional[int] =torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
__magic_name__ : Tuple =torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
__magic_name__ : Tuple =torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
__magic_name__ : List[Any] =torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase , atol=1E-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase_ : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 21 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCAmelCase ( __UpperCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
__lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
__lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
__lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
__lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
__lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
__lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
__lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
__lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
__lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' )
__lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' )
__lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
__lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
__lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
__lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
__lowercase : Tuple = value.float()
for key, value in codebook_state_dict.items():
__lowercase : int = value
return upgrade
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
if config_path is not None:
__lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase )
else:
__lowercase : Union[str, Any] = FlavaConfig()
__lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval()
__lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
__lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' )
__lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
__lowercase : Union[str, Any] = hf_model.state_dict()
__lowercase : Optional[Any] = count_parameters(__UpperCamelCase )
__lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 76 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> str:
"""simple docstring"""
_lowerCamelCase = path_or_paths
_lowerCamelCase = split if split or isinstance(A_ , A_ ) else '''train'''
_lowerCamelCase = features
_lowerCamelCase = cache_dir
_lowerCamelCase = keep_in_memory
_lowerCamelCase = streaming
_lowerCamelCase = num_proc
_lowerCamelCase = kwargs
@abstractmethod
def UpperCamelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Any:
"""simple docstring"""
_lowerCamelCase = features
_lowerCamelCase = cache_dir
_lowerCamelCase = keep_in_memory
_lowerCamelCase = streaming
_lowerCamelCase = num_proc
_lowerCamelCase = kwargs
@abstractmethod
def UpperCamelCase_ ( self ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass | 720 | import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ ) | 638 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.