code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__a :List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__a :Union[str, Any] = [0, 25, 50]
__a :Any = [25, 50, 75]
__a :Tuple = fuzz.membership.trimf(X, abca)
__a :List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__a :Union[str, Any] = np.ones(75)
__a :Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__a :Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__a :Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__a :List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__a :List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__a :Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__a :Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__a :Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__a :Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 86 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'bart'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , _lowerCamelCase=50265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=3 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : str = encoder_ffn_dim
UpperCAmelCase__ : List[str] = encoder_layers
UpperCAmelCase__ : Optional[int] = encoder_attention_heads
UpperCAmelCase__ : Optional[int] = decoder_ffn_dim
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : int = decoder_attention_heads
UpperCAmelCase__ : Optional[Any] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : List[str] = init_std
UpperCAmelCase__ : Dict = encoder_layerdrop
UpperCAmelCase__ : Any = decoder_layerdrop
UpperCAmelCase__ : List[str] = classifier_dropout
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : Union[str, Any] = encoder_layers
UpperCAmelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ : Dict = {0: """batch"""}
UpperCAmelCase__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Any = super().outputs
else:
UpperCAmelCase__ : Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
UpperCAmelCase__ : int = seq_length if not self.use_past else 1
UpperCAmelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : Union[str, Any] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : str = common_inputs["""input_ids"""].shape
UpperCAmelCase__ : int = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_attention_heads
UpperCAmelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = decoder_seq_length + 3
UpperCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
UpperCAmelCase__ : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
UpperCAmelCase__ : Dict = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
UpperCAmelCase__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : int = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ : str = self.num_attention_heads
UpperCAmelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase__ : List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
UpperCAmelCase__ : int = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : Optional[int] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
UpperCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
UpperCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Union[str, Any] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ : Optional[int] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 182 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any]= logging.get_logger(__name__)
_a : Dict= {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase ( __lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = "transfo-xl"
UpperCAmelCase : List[Any] = ["mems"]
UpperCAmelCase : Tuple = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self : Tuple , _A : str=26_77_35 , _A : Dict=[2_00_00, 4_00_00, 20_00_00] , _A : Dict=10_24 , _A : Any=10_24 , _A : Any=16 , _A : Optional[Any]=64 , _A : Any=40_96 , _A : str=4 , _A : Dict=False , _A : Union[str, Any]=18 , _A : str=16_00 , _A : Any=10_00 , _A : Any=True , _A : int=True , _A : Tuple=0 , _A : str=-1 , _A : List[Any]=True , _A : List[Any]=0.1 , _A : int=0.0 , _A : Union[str, Any]=True , _A : Union[str, Any]="normal" , _A : str=0.01 , _A : Optional[Any]=0.01 , _A : List[str]=0.02 , _A : Dict=1E-5 , _A : Dict=0 , **_A : List[str] , ) -> Dict:
__snake_case : Dict = vocab_size
__snake_case : Optional[Any] = []
self.cutoffs.extend(lowerCamelCase__)
if proj_share_all_but_first:
__snake_case : List[str] = [False] + [True] * len(self.cutoffs)
else:
__snake_case : List[Any] = [False] + [False] * len(self.cutoffs)
__snake_case : Dict = d_model
__snake_case : Union[str, Any] = d_embed
__snake_case : Tuple = d_head
__snake_case : int = d_inner
__snake_case : Tuple = div_val
__snake_case : List[str] = pre_lnorm
__snake_case : Union[str, Any] = n_layer
__snake_case : Any = n_head
__snake_case : Tuple = mem_len
__snake_case : Dict = same_length
__snake_case : int = attn_type
__snake_case : Dict = clamp_len
__snake_case : int = sample_softmax
__snake_case : List[str] = adaptive
__snake_case : Any = dropout
__snake_case : int = dropatt
__snake_case : Union[str, Any] = untie_r
__snake_case : Dict = init
__snake_case : Union[str, Any] = init_range
__snake_case : Union[str, Any] = proj_init_std
__snake_case : str = init_std
__snake_case : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
@property
def _lowercase (self : int) -> List[str]:
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def _lowercase (self : Optional[Any] , _A : Tuple) -> Union[str, Any]:
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 718 | """simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int]= get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = XLMProphetNetTokenizer
UpperCAmelCase : int = False
UpperCAmelCase : List[Any] = True
def _lowercase (self : Dict) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Tuple = XLMProphetNetTokenizer(_A , keep_accents=_A)
tokenizer.save_pretrained(self.tmpdirname)
def _lowercase (self : str) -> Optional[int]:
__snake_case : List[str] = '[PAD]'
__snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A)
def _lowercase (self : Dict) -> str:
__snake_case : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '[PAD]')
self.assertEqual(vocab_keys[1] , '[CLS]')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(_A) , 10_12)
def _lowercase (self : Dict) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12)
def _lowercase (self : str) -> Dict:
__snake_case : int = XLMProphetNetTokenizer(_A , keep_accents=_A)
__snake_case : Tuple = tokenizer.tokenize('This is a test')
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(_A)
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(_A)
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def _lowercase (self : List[Any]) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased')
@slow
def _lowercase (self : Tuple) -> Any:
__snake_case : Dict = 'Hello World!'
__snake_case : List[str] = [3_53_89, 66_72, 49, 2]
self.assertListEqual(_A , self.big_tokenizer.encode(_A))
@slow
def _lowercase (self : Union[str, Any]) -> List[Any]:
# fmt: off
__snake_case : Any = {'input_ids': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 192 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = prime_factors(__lowerCamelCase )
if is_square_free(__lowerCamelCase ):
return -1 if len(__lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
def a__ ( snake_case__ : int , snake_case__ : int ):
return 1 if input_a == input_a else 0
def a__ ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 643 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = 42
# setable values
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
return cls(common=__snake_case , init_noise_sigma=__snake_case , timesteps=__snake_case )
@dataclass
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 42
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = [e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE = 42
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return True
@register_to_config
def __init__( self , __snake_case = 1000 , __snake_case = 0.0001 , __snake_case = 0.02 , __snake_case = "linear" , __snake_case = None , __snake_case = "fixed_small" , __snake_case = True , __snake_case = "epsilon" , __snake_case = jnp.floataa , ) -> Union[str, Any]:
'''simple docstring'''
__a =dtype
def __magic_name__ ( self , __snake_case = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
__a =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a =jnp.array(1.0 , dtype=self.dtype )
__a =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__snake_case , init_noise_sigma=__snake_case , timesteps=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = () ) -> DDPMSchedulerState:
'''simple docstring'''
__a =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a =(jnp.arange(0 , __snake_case ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__snake_case , timesteps=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case=None ) -> Dict:
'''simple docstring'''
__a =state.common.alphas_cumprod[t]
__a =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a =jnp.clip(__snake_case , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a =jnp.log(jnp.clip(__snake_case , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__a =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a =variance
__a =state.common.betas[t]
__a =(predicted_variance + 1) / 2
__a =frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = None , __snake_case = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
__a =timestep
if key is None:
__a =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a =jnp.split(__snake_case , sample.shape[1] , axis=1 )
else:
__a =None
# 1. compute alphas, betas
__a =state.common.alphas_cumprod[t]
__a =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a =1 - alpha_prod_t
__a =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a =model_output
elif self.config.prediction_type == "v_prediction":
__a =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a =jnp.clip(__snake_case , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a =jax.random.split(__snake_case , num=1 )
__a =jax.random.normal(__snake_case , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__snake_case , __snake_case , predicted_variance=__snake_case ) ** 0.5) * noise
__a =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__snake_case , state=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , __snake_case , __snake_case , __snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , __snake_case , __snake_case , __snake_case )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 242 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , *,
__snake_case = 4 , __snake_case = 768 , __snake_case , __snake_case , ) -> str:
'''simple docstring'''
super().__init__()
__a =nn.Parameter(torch.zeros(__snake_case ) )
# parameters for additional clip time embeddings
__a =nn.Linear(__snake_case , __snake_case )
__a =nn.Linear(__snake_case , __snake_case )
# parameters for encoder hidden states
__a =clip_extra_context_tokens
__a =nn.Linear(
__snake_case , self.clip_extra_context_tokens * cross_attention_dim )
__a =nn.Linear(__snake_case , __snake_case )
__a =nn.LayerNorm(__snake_case )
def __magic_name__ ( self , *, __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__a =image_embeddings.shape[0]
__a =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__a =classifier_free_guidance_embeddings.expand(
__snake_case , -1 )
__a =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__a =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__a =self.embedding_proj(__snake_case )
__a =self.clip_image_embeddings_project_to_time_embeddings(__snake_case )
__a =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__a =self.clip_extra_context_tokens_proj(__snake_case )
__a =clip_extra_context_tokens.reshape(__snake_case , -1 , self.clip_extra_context_tokens )
__a =clip_extra_context_tokens.permute(0 , 2 , 1 )
__a =self.encoder_hidden_states_proj(__snake_case )
__a =self.text_encoder_hidden_states_norm(__snake_case )
__a =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 242 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 582 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowercase__:
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( A_ : str ) -> Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
snake_case__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =INVOICE_URL
UpperCamelCase__ : Tuple =list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , "")))
UpperCamelCase__ : int ="What is the placebo?"
UpperCamelCase__ : Optional[Any] =[
{
"image": load_image(__SCREAMING_SNAKE_CASE),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =dqa_pipeline(__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE), "start": ANY(__SCREAMING_SNAKE_CASE), "end": ANY(__SCREAMING_SNAKE_CASE)},
{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE), "start": ANY(__SCREAMING_SNAKE_CASE), "end": ANY(__SCREAMING_SNAKE_CASE)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2")
UpperCamelCase__ : Tuple =INVOICE_URL
UpperCamelCase__ : Dict ="How many cats are there?"
UpperCamelCase__ : List[Any] =[
{"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCamelCase__ : str =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , __SCREAMING_SNAKE_CASE)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase__ : Optional[int] ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ : List[str] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase__ : Any ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ : Tuple =[]
UpperCamelCase__ : Union[str, Any] =[]
UpperCamelCase__ : Optional[int] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , words=__SCREAMING_SNAKE_CASE , boxes=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] =pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCamelCase__ : Dict =INVOICE_URL
UpperCamelCase__ : List[Any] ="What is the invoice number?"
UpperCamelCase__ : Any =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : Dict =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : List[Any] =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCamelCase__ : Tuple =INVOICE_URL
UpperCamelCase__ : Dict ="What is the invoice number?"
UpperCamelCase__ : List[Any] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : Optional[int] =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : List[str] =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__SCREAMING_SNAKE_CASE , revision="3dc6de3" , )
UpperCamelCase__ : List[str] =INVOICE_URL
UpperCamelCase__ : Tuple ="What is the invoice number?"
UpperCamelCase__ : Union[str, Any] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCamelCase__ : Optional[int] =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCamelCase__ : Optional[int] =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCamelCase__ : Optional[Any] =list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , "")))
# This model should also work if `image` is set to None
UpperCamelCase__ : List[Any] =dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__SCREAMING_SNAKE_CASE , revision="3dc6de3" , max_seq_len=50 , )
UpperCamelCase__ : str =INVOICE_URL
UpperCamelCase__ : List[str] ="What is the invoice number?"
UpperCamelCase__ : Union[str, Any] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : int =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCamelCase__ : List[Any] =list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , "")))
# This model should also work if `image` is set to None
UpperCamelCase__ : Optional[Any] =dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCamelCase__ : Optional[Any] =INVOICE_URL
UpperCamelCase__ : Dict ="What is the invoice number?"
UpperCamelCase__ : Any =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [{"answer": "us-001"}])
@require_tf
@unittest.skip("Document question answering not implemented in TF")
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
| 582 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ):
__SCREAMING_SNAKE_CASE :Tuple = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : List[str] , a__ : int , a__ : int , a__ : Optional[int] = None , a__ : int = 5_0257 , a__ : int = 1024 , a__ : int = 768 , a__ : int = 12 , a__ : int = 12 , a__ : Optional[int] = None , a__ : str = "gelu_new" , a__ : float = 0.1 , a__ : float = 0.1 , a__ : float = 0.1 , a__ : float = 1E-5 , a__ : float = 0.02 , a__ : bool = True , a__ : bool = True , a__ : bool = False , a__ : bool = False , ):
super().__init__()
__magic_name__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
__magic_name__ = prefix_inner_dim
__magic_name__ = prefix_hidden_dim
__magic_name__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__magic_name__ = (
nn.Linear(self.prefix_hidden_dim , a__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__magic_name__ = GPTaConfig(
vocab_size=a__ , n_positions=a__ , n_embd=a__ , n_layer=a__ , n_head=a__ , n_inner=a__ , activation_function=a__ , resid_pdrop=a__ , embd_pdrop=a__ , attn_pdrop=a__ , layer_norm_epsilon=a__ , initializer_range=a__ , scale_attn_weights=a__ , use_cache=a__ , scale_attn_by_inverse_layer_idx=a__ , reorder_and_upcast_attn=a__ , )
__magic_name__ = GPTaLMHeadModel(a__ )
def snake_case__ ( self : int , a__ : torch.Tensor , a__ : torch.Tensor , a__ : Optional[torch.Tensor] = None , a__ : Optional[torch.Tensor] = None , ):
__magic_name__ = self.transformer.transformer.wte(a__ )
__magic_name__ = self.encode_prefix(a__ )
__magic_name__ = self.decode_prefix(a__ )
__magic_name__ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__magic_name__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__magic_name__ = torch.cat((dummy_token, input_ids) , dim=1 )
__magic_name__ = self.transformer(inputs_embeds=a__ , labels=a__ , attention_mask=a__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self : Union[str, Any] , a__ : int , a__ : torch.device ):
return torch.zeros(a__ , self.prefix_length , dtype=torch.intaa , device=a__ )
def snake_case__ ( self : Optional[int] , a__ : Optional[int] ):
return self.encode_prefix(a__ )
@torch.no_grad()
def snake_case__ ( self : Union[str, Any] , a__ : List[str] , a__ : Dict , a__ : List[str] ):
__magic_name__ = torch.split(a__ , 1 , dim=0 )
__magic_name__ = []
__magic_name__ = []
for feature in features:
__magic_name__ = self.decode_prefix(feature.to(a__ ) ) # back to the clip feature
# Only support beam search for now
__magic_name__ , __magic_name__ = self.generate_beam(
input_embeds=a__ , device=a__ , eos_token_id=a__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__magic_name__ = torch.stack(a__ )
__magic_name__ = torch.stack(a__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self : List[str] , a__ : List[Any]=None , a__ : List[str]=None , a__ : Dict=None , a__ : int = 5 , a__ : int = 67 , a__ : float = 1.0 , a__ : Optional[int] = None , ):
__magic_name__ = eos_token_id
__magic_name__ = None
__magic_name__ = None
__magic_name__ = torch.ones(a__ , device=a__ , dtype=torch.int )
__magic_name__ = torch.zeros(a__ , device=a__ , dtype=torch.bool )
if input_embeds is not None:
__magic_name__ = input_embeds
else:
__magic_name__ = self.transformer.transformer.wte(a__ )
for i in range(a__ ):
__magic_name__ = self.transformer(inputs_embeds=a__ )
__magic_name__ = outputs.logits
__magic_name__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__magic_name__ = logits.softmax(-1 ).log()
if scores is None:
__magic_name__ , __magic_name__ = logits.topk(a__ , -1 )
__magic_name__ = generated.expand(a__ , *generated.shape[1:] )
__magic_name__ , __magic_name__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__magic_name__ = next_tokens
else:
__magic_name__ = tokens.expand(a__ , *tokens.shape[1:] )
__magic_name__ = torch.cat((tokens, next_tokens) , dim=1 )
else:
__magic_name__ = -float(np.inf )
__magic_name__ = 0
__magic_name__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__magic_name__ = scores_sum / seq_lengths[:, None]
__magic_name__ , __magic_name__ = scores_sum_average.view(-1 ).topk(a__ , -1 )
__magic_name__ = next_tokens // scores_sum.shape[1]
__magic_name__ = seq_lengths[next_tokens_source]
__magic_name__ = next_tokens % scores_sum.shape[1]
__magic_name__ = next_tokens.unsqueeze(1 )
__magic_name__ = tokens[next_tokens_source]
__magic_name__ = torch.cat((tokens, next_tokens) , dim=1 )
__magic_name__ = generated[next_tokens_source]
__magic_name__ = scores_sum_average * seq_lengths
__magic_name__ = is_stopped[next_tokens_source]
__magic_name__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__magic_name__ = torch.cat((generated, next_token_embed) , dim=1 )
__magic_name__ = is_stopped + next_tokens.eq(a__ ).squeeze()
if is_stopped.all():
break
__magic_name__ = scores / seq_lengths
__magic_name__ = scores.argsort(descending=a__ )
# tokens tensors are already padded to max_seq_length
__magic_name__ = [tokens[i] for i in order]
__magic_name__ = torch.stack(a__ , dim=0 )
__magic_name__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 432 |
'''simple docstring'''
_lowerCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 432 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger(__name__)
def __a ( A , A=False , A=False ) -> Optional[int]:
'''simple docstring'''
A__ = "backbone." if is_semantic else ""
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __a ( A , A , A=False , A=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
A__ = "backbone." if is_semantic else ""
# queries, keys and values
A__ = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
A__ = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
A__ = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
A__ = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
A__ = gamma_a
A__ = gamma_a
def __a ( A , A , A ) -> Tuple:
'''simple docstring'''
A__ = dct.pop(A )
A__ = val
def __a ( ) -> Dict:
'''simple docstring'''
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __a ( A , A , A=False ) -> Optional[int]:
'''simple docstring'''
A__ = False if "rvlcdip" in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=A , use_mask_token=A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = "huggingface/label-files"
A__ = "rvlcdip-id2label.json"
A__ = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
A__ = {int(A ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" )["model"]
A__ = create_rename_keys(A , has_lm_head=A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , has_lm_head=A )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(A ) if has_lm_head else BeitForImageClassification(A )
model.eval()
model.load_state_dict(A )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=A )
A__ = prepare_img()
A__ = image_processor(images=A , return_tensors="pt" )
A__ = encoding["pixel_values"]
A__ = model(A )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(A ), "Shape of logits not as expected"
Path(A ).mkdir(exist_ok=A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
if has_lm_head:
A__ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
A__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(A , A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=A , )
model.push_to_hub(
repo_path_or_name=Path(A , A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=A , )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__UpperCAmelCase =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
A__ = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
A__ = model(UpperCamelCase__ )["last_hidden_state"]
A__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 261 | 0 |
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 100 )-> int:
'''simple docstring'''
return sum(int(lowerCamelCase_ ) for x in str(factorial(lowerCamelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 438 |
import os
import sys
import unittest
UpperCamelCase__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__ : Tuple = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCamelCase__ : List[Any] = '''
{0} = None
'''
UpperCamelCase__ : str = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCamelCase__ : List[Any] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(snake_case__ ,'tokenizers' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(snake_case__ ,'tensorflow_text' )
SCREAMING_SNAKE_CASE_ : int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers' )
SCREAMING_SNAKE_CASE_ : str = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tensorflow_text' )
SCREAMING_SNAKE_CASE_ : Tuple = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,snake_case__ )
self.assertIn('tensorflow_text' ,snake_case__ )
self.assertIn('sentencepiece_and_tokenizers' ,snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertModel' ,objects['tf'] )
self.assertIn('FlaxBertModel' ,objects['flax'] )
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertTokenizer' ,objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' ,objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(snake_case__ ,'\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
snake_case__ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE_ : List[str] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,snake_case__ )
| 105 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XLMTokenizer
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCAmelCase ) )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = '''lower newer'''
_lowerCAmelCase = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ):
_lowerCAmelCase = XLMTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = '''lower'''
_lowerCAmelCase = ['''low''', '''er</w>''']
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokens + ['''<unk>''']
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
_lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 664 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=2, ) -> Any:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Tuple = image_size
UpperCamelCase : List[Any] = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Optional[Any] = scope
UpperCamelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
UpperCamelCase : str = num_patches + 2
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Optional[int]:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : int = DeiTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[int] = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : Tuple = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = self.type_sequence_label_size
UpperCamelCase : Dict = DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : str = 1
UpperCamelCase : List[Any] = DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = DeiTModelTester(self )
UpperCamelCase : str = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def snake_case_ ( self ) -> str:
pass
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_, nn.Linear ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
UpperCamelCase : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case_ ( self ) -> str:
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Dict:
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : Tuple = False
UpperCamelCase : int = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase : Dict = problem_type['title']
UpperCamelCase : Optional[int] = problem_type['num_labels']
UpperCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
UpperCamelCase : int = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
UpperCamelCase : Tuple = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
UpperCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[str]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : List[Any] = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : int = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> str:
UpperCamelCase : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224', torch_dtype=torch.floataa, device_map='auto' )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
UpperCamelCase : Optional[int] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
| 40 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A = logging.get_logger(__name__)
A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
A = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
A = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= VOCAB_FILES_NAMES
A__= PRETRAINED_VOCAB_FILES_MAP
A__= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__= PRETRAINED_INIT_CONFIGURATION
A__= ['input_ids', 'attention_mask']
A__= DistilBertTokenizer
def __init__( self : List[Any] , _lowercase : Optional[Any]=None , _lowercase : Any=None , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : Optional[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Optional[Any]="[CLS]" , _lowercase : Optional[Any]="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : Any=None , **_lowercase : str , ):
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowercase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(_lowercase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**_lowercase )
UpperCAmelCase__ = do_lower_case
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : int=None ):
"""simple docstring"""
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 277 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A = logging.get_logger(__name__)
def __UpperCAmelCase ( __A , __A , __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__A ) == len(__A ), F"""{len(__A )} != {len(__A )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __UpperCAmelCase ( __A , __A ) -> int:
'''simple docstring'''
try:
UpperCAmelCase__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__A ) )
def __UpperCAmelCase ( __A , __A ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__A ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __UpperCAmelCase ( __A , __A = "student" , __A = None , __A = None , __A=False , __A=None , __A=None , **__A , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
UpperCAmelCase__ = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(__A , __A ):
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A ) # purely for convenience
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__A ).eval()
else:
assert isinstance(__A , __A ), F"""teacher must be a model or string got type {type(__A )}"""
UpperCAmelCase__ = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ = teacher_e
if d is None:
UpperCAmelCase__ = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ = teacher_e
if d is None:
UpperCAmelCase__ = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__A )
# Copy weights
UpperCAmelCase__ = teacher.config_class(**__A )
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_config(__A )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ = student.load_state_dict(teacher.state_dict() , strict=__A )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ = list(range(__A ) ), list(range(__A ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__A )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ = pick_layers_to_copy(__A , __A )
if d_layers_to_copy is None:
UpperCAmelCase__ = pick_layers_to_copy(__A , __A )
try:
if hasattr(
__A , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __A )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __A )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __A )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __A )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __A )
copy_layers(teacher.decoder.block , student.decoder.block , __A )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCAmelCase__ = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(__A )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 277 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
UpperCAmelCase_ = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
UpperCAmelCase_ = """▁"""
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : List[Any]="[SEP]" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : str="[SEP]" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : int="[CLS]" , __lowerCAmelCase : List[str]="[MASK]" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def snake_case_ ( self : List[Any] ) -> int:
return len(self.sp_model )
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> str:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Dict , __lowerCAmelCase : Dict ) -> Tuple:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self : str , __lowerCAmelCase : str ) -> Any:
if self.remove_space:
_A = ''' '''.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_A = unicodedata.normalize('''NFKD''' , __lowerCAmelCase )
_A = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> List[str]:
_A = self.preprocess_text(__lowerCAmelCase )
_A = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
_A = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] ) -> Dict:
return self.sp_model.PieceToId(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
return self.sp_model.IdToPiece(__lowerCAmelCase )
def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
_A = []
_A = ''''''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_A = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 2 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 | from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self : Union[str, Any],_A : list[tuple[float, float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : List[Any] = len(_A ) - 1
def __UpperCamelCase ( self : Any,_A : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,_A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ),5 ) == 1
return output_values
def __UpperCamelCase ( self : str,_A : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.basis_function(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.0
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCamelCase ( self : Any,_A : float = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Tuple = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Tuple = [i[1] for i in self.list_of_points]
plt.plot(
_A,_A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(_A,_A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 316 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE_ = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = None
for i in sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE):
UpperCamelCase = Node(_SCREAMING_SNAKE_CASE , self.head)
def __iter__( self) -> Iterator[int]:
UpperCamelCase = self.head
while node:
yield node.data
UpperCamelCase = node.next_node
def __len__( self) -> int:
return sum(1 for _ in self)
def __str__( self) -> str:
return " -> ".join([str(_SCREAMING_SNAKE_CASE) for node in self])
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return SortedLinkedList(list(_lowercase ) + list(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 34 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "trocr"
snake_case = ["past_key_values"]
snake_case = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0265 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
A_ : List[str] = vocab_size
A_ : Tuple = d_model
A_ : List[Any] = decoder_layers
A_ : Optional[int] = decoder_attention_heads
A_ : Any = decoder_ffn_dim
A_ : Union[str, Any] = activation_function
A_ : str = max_position_embeddings
A_ : Dict = dropout
A_ : int = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : int = init_std
A_ : Any = decoder_layerdrop
A_ : int = use_cache
A_ : int = scale_embedding
A_ : Optional[int] = use_learned_position_embeddings
A_ : str = layernorm_embedding
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 590 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =VideoToVideoSDPipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
__UpperCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCamelCase =False
# No `output_type`.
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(snake_case__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Any=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'np'
SCREAMING_SNAKE_CASE = sd_pipe(**snake_case__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=snake_case__ )
SCREAMING_SNAKE_CASE = video.to('cuda' )
SCREAMING_SNAKE_CASE = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = len(snake_case_ )
_A : str = [[0] * n for i in range(snake_case_ )]
for i in range(snake_case_ ):
_A : Optional[Any] = y_points[i]
for i in range(2,snake_case_ ):
for j in range(snake_case_,snake_case_ ):
_A : List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : str = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474 | 0 |
'''simple docstring'''
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Dict ):
UpperCAmelCase = val
UpperCAmelCase = None
UpperCAmelCase = None
def __snake_case ( self : Tuple , a__ : List[Any] ):
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase = Node(a__ )
else:
self.left.insert(a__ )
elif val > self.val:
if self.right is None:
UpperCAmelCase = Node(a__ )
else:
self.right.insert(a__ )
else:
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE_ )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE_ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return arr
UpperCAmelCase = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase = []
inorder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 51 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def wrapper(*_UpperCAmelCase, **_UpperCAmelCase ):
lowerCAmelCase : str = timeit.default_timer()
lowerCAmelCase : str = func(*_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Optional[int] = timeit.default_timer() - starttime
return delta
lowerCAmelCase : Union[str, Any] = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = seq_shapes or {}
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_UpperCAmelCase, _ArrayXD ):
lowerCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_UpperCAmelCase, datasets.Value ):
if v.dtype == "string":
lowerCAmelCase : Any = 'The small grey turtle was surprisingly fast when challenged.'
else:
lowerCAmelCase : Optional[Any] = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_UpperCAmelCase, datasets.Sequence ):
while isinstance(_UpperCAmelCase, datasets.Sequence ):
lowerCAmelCase : int = v.feature
lowerCAmelCase : Optional[int] = seq_shapes[k]
lowerCAmelCase : str = np.random.rand(*_UpperCAmelCase ).astype(v.dtype )
lowerCAmelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Any = generate_examples(_UpperCAmelCase, num_examples=_UpperCAmelCase, seq_shapes=_UpperCAmelCase )
with ArrowWriter(features=_UpperCAmelCase, path=_UpperCAmelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase : Any = features.encode_example(_UpperCAmelCase )
writer.write(_UpperCAmelCase )
lowerCAmelCase , lowerCAmelCase : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCAmelCase : int = datasets.Dataset.from_file(filename=_UpperCAmelCase, info=datasets.DatasetInfo(features=_UpperCAmelCase ) )
return dataset
| 343 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar("KEY")
SCREAMING_SNAKE_CASE : int = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase, slots=_UpperCamelCase )
class snake_case ( Generic[KEY, VAL] ):
"""simple docstring"""
_a = 42
_a = 42
class snake_case ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(__a, __a )
def __bool__( self ) -> bool:
return False
SCREAMING_SNAKE_CASE : Optional[int] = _DeletedItem()
class snake_case ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self, _lowercase = 8, _lowercase = 0.75 ) -> None:
SCREAMING_SNAKE_CASE_ = initial_block_size
SCREAMING_SNAKE_CASE_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
SCREAMING_SNAKE_CASE_ = capacity_factor
SCREAMING_SNAKE_CASE_ = 0
def a__ ( self, _lowercase ) -> int:
return hash(__a ) % len(self._buckets )
def a__ ( self, _lowercase ) -> int:
return (ind + 1) % len(self._buckets )
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> bool:
SCREAMING_SNAKE_CASE_ = self._buckets[ind]
if not stored:
SCREAMING_SNAKE_CASE_ = _Item(__a, __a )
self._len += 1
return True
elif stored.key == key:
SCREAMING_SNAKE_CASE_ = _Item(__a, __a )
return True
else:
return False
def a__ ( self ) -> bool:
SCREAMING_SNAKE_CASE_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def a__ ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
SCREAMING_SNAKE_CASE_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ ( self, _lowercase ) -> None:
SCREAMING_SNAKE_CASE_ = self._buckets
SCREAMING_SNAKE_CASE_ = [None] * new_size
SCREAMING_SNAKE_CASE_ = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def a__ ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def a__ ( self, _lowercase ) -> Iterator[int]:
SCREAMING_SNAKE_CASE_ = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
SCREAMING_SNAKE_CASE_ = self._get_next_ind(__a )
def a__ ( self, _lowercase, _lowercase ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a, __a, __a ):
break
def __setitem__( self, _lowercase, _lowercase ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a, __a )
def __delitem__( self, _lowercase ) -> None:
for ind in self._iterate_buckets(__a ):
SCREAMING_SNAKE_CASE_ = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
SCREAMING_SNAKE_CASE_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, _lowercase ) -> VAL:
for ind in self._iterate_buckets(__a ):
SCREAMING_SNAKE_CASE_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
SCREAMING_SNAKE_CASE_ = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 707 |
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> float:
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase ( lowerCAmelCase__: float ,lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> float:
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _UpperCamelCase ( lowerCAmelCase__: float ,lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> float:
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _UpperCamelCase ( lowerCAmelCase__: float ,lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> float:
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''xmod'''
def __init__( self : List[str] , lowerCAmelCase_ : Any=3_05_22 , lowerCAmelCase_ : Optional[Any]=7_68 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : int=30_72 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[int]=("en_XX",) , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : List[Any] , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
UpperCAmelCase_ = pre_norm
UpperCAmelCase_ = adapter_reduction_factor
UpperCAmelCase_ = adapter_layer_norm
UpperCAmelCase_ = adapter_reuse_layer_norm
UpperCAmelCase_ = ln_before_adapter
UpperCAmelCase_ = list(lowerCAmelCase_ )
UpperCAmelCase_ = default_language
class snake_case__ ( __snake_case ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 121 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
__A = 42
__A = None
__A = None
_lowerCamelCase : str = namedtuple('CoinsDistribResult', 'moves excess')
def _lowerCAmelCase ( __magic_name__ :TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__magic_name__ :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 1 |
"""simple docstring"""
from itertools import permutations
def A__ ( UpperCamelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A__ ( UpperCamelCase = 10 ):
return sum(
int("".join(map(UpperCamelCase , UpperCamelCase ) ) )
for num in permutations(range(UpperCamelCase ) )
if is_substring_divisible(UpperCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def A__ ( UpperCamelCase ):
A, A, A = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def A__ ( UpperCamelCase ):
return (gray > 127) & (gray <= 255)
def A__ ( UpperCamelCase , UpperCamelCase ):
A = np.zeros_like(UpperCamelCase )
A = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_snake_case : str = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_snake_case : Any = np.array(Image.open(lena_path))
# kernel to be applied
_snake_case : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_snake_case : int = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_snake_case : List[Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 524 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _A ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : str = torchvision.models.resnetaaa(pretrained=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Sequential(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.pool(self.model(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : int = torch.flatten(__lowerCAmelCase , start_dim=2 )
SCREAMING_SNAKE_CASE_ : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _A ( a__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [json.loads(__lowerCAmelCase ) for l in open(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = tokenizer
SCREAMING_SNAKE_CASE_ : str = labels
SCREAMING_SNAKE_CASE_ : Dict = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = max_seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE_ : Tuple = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[str] = self.transforms(__lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [len(row['sentence'] ) for row in batch]
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(a ), max(a )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros(a , a , dtype=torch.long )
SCREAMING_SNAKE_CASE_ : Any = torch.zeros(a , a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a , a ) ):
SCREAMING_SNAKE_CASE_ : int = input_row['''sentence''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : str = torch.stack([row['image'] for row in batch] )
SCREAMING_SNAKE_CASE_ : Dict = torch.stack([row['label'] for row in batch] )
SCREAMING_SNAKE_CASE_ : Dict = torch.stack([row['image_start_token'] for row in batch] )
SCREAMING_SNAKE_CASE_ : int = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 511 | '''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ):
try:
lowercase__ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : List[Any] = strtobool(UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__a: Dict = parse_flag_from_env("""RUN_SLOW""", default=False)
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skip('''Test was skipped''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase=None , UpperCAmelCase=None ):
if test_case is None:
return partial(UpperCAmelCase , version=UpperCAmelCase )
return unittest.skipUnless(is_torch_version('''>=''' , UpperCAmelCase ) , F"""test requires torch version >= {version}""" )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(UpperCAmelCase )
__a: Tuple = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __UpperCamelCase ( UpperCAmelCase ):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(UpperCAmelCase )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
@classmethod
def _lowerCAmelCase( cls ) -> List[Any]:
lowercase__ : Tuple = tempfile.mkdtemp()
@classmethod
def _lowerCAmelCase( cls ) -> Union[str, Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowerCAmelCase( self ) -> List[str]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowerCAmelCase )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> int:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Union[str, Any] = mocks if isinstance(__lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = AcceleratorState()
lowercase__ : List[str] = tensor[None].clone().to(state.device )
lowercase__ : str = gather(UpperCAmelCase ).cpu()
lowercase__ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCAmelCase ):
return False
return True
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = returncode
lowercase__ : Optional[Any] = stdout
lowercase__ : str = stderr
async def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(UpperCAmelCase )
else:
break
async def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCAmelCase ) )
lowercase__ : List[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : List[Any] = []
lowercase__ : Dict = []
def tee(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="" ):
lowercase__ : int = line.decode('''utf-8''' ).rstrip()
sink.append(UpperCAmelCase )
if not quiet:
print(UpperCAmelCase , UpperCAmelCase , file=UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=UpperCAmelCase , )
return _RunOutput(await p.wait() , UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=180 , UpperCAmelCase=False , UpperCAmelCase=True ):
lowercase__ : Dict = asyncio.get_event_loop()
lowercase__ : Dict = loop.run_until_complete(
_stream_subprocess(UpperCAmelCase , env=UpperCAmelCase , stdin=UpperCAmelCase , timeout=UpperCAmelCase , quiet=UpperCAmelCase , echo=UpperCAmelCase ) )
lowercase__ : Optional[Any] = ''' '''.join(UpperCAmelCase )
if result.returncode > 0:
lowercase__ : Tuple = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class UpperCAmelCase ( a__ ):
'''simple docstring'''
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ):
try:
lowercase__ : Optional[Any] = subprocess.check_output(UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCAmelCase , '''decode''' ):
lowercase__ : Tuple = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 152 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int =parent
UpperCamelCase__ : int =batch_size
UpperCamelCase__ : Optional[int] =patch_size
UpperCamelCase__ : str =max_length
UpperCamelCase__ : Dict =num_mel_bins
UpperCamelCase__ : Union[str, Any] =is_training
UpperCamelCase__ : Optional[int] =use_labels
UpperCamelCase__ : List[str] =hidden_size
UpperCamelCase__ : List[str] =num_hidden_layers
UpperCamelCase__ : str =num_attention_heads
UpperCamelCase__ : Dict =intermediate_size
UpperCamelCase__ : str =hidden_act
UpperCamelCase__ : int =hidden_dropout_prob
UpperCamelCase__ : str =attention_probs_dropout_prob
UpperCamelCase__ : str =type_sequence_label_size
UpperCamelCase__ : str =initializer_range
UpperCamelCase__ : Any =scope
UpperCamelCase__ : str =frequency_stride
UpperCamelCase__ : str =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase__ : Any =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase__ : Optional[Any] =(self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase__ : Optional[int] =frequency_out_dimension * time_out_dimension
UpperCamelCase__ : Union[str, Any] =num_patches + 2
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
UpperCamelCase__ : Optional[int] =None
if self.use_labels:
UpperCamelCase__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Dict =self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =ASTModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
UpperCamelCase__ : str =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : Optional[int] =config_and_inputs
UpperCamelCase__ : Dict ={"input_values": input_values}
return config, inputs_dict
@require_torch
class lowercase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =ASTModelTester(self)
UpperCamelCase__ : Dict =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds")
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] =model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : int =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] =model_class(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int =[*signature.parameters.keys()]
UpperCamelCase__ : int =["input_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@slow
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict =ASTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCamelCase__ , UpperCamelCase__ : Any =torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593")
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] =self.default_feature_extractor
UpperCamelCase__ : Union[str, Any] =ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =self.default_feature_extractor
UpperCamelCase__ , UpperCamelCase__ : Tuple =prepare_audio()
UpperCamelCase__ : int =audio.squeeze().numpy()
UpperCamelCase__ : Tuple =feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , return_tensors="pt").to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Tuple =model(**__SCREAMING_SNAKE_CASE)
# verify the logits
UpperCamelCase__ : List[Any] =torch.Size((1, 5_27))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =torch.tensor([-0.87_60, -7.00_42, -8.66_02]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 582 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = OpenAIGPTTokenizer
snake_case__ = OpenAIGPTTokenizerFast
snake_case__ = True
snake_case__ = False
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Dict =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase__ : List[Any] =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
UpperCamelCase__ : Tuple =["#version: 0.2", "l o", "lo w", "e r</w>", ""]
UpperCamelCase__ : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
UpperCamelCase__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any =OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
UpperCamelCase__ : Dict ="lower"
UpperCamelCase__ : Union[str, Any] =["low", "er</w>"]
UpperCamelCase__ : Any =tokenizer.tokenize(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =tokens + ["<unk>"]
UpperCamelCase__ : Union[str, Any] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=15) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCamelCase__ : str =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# Simple input
UpperCamelCase__ : List[str] ="This is a simple input"
UpperCamelCase__ : Dict =["This is a simple input 1", "This is a simple input 2"]
UpperCamelCase__ : Union[str, Any] =("This is a simple input", "This is a pair")
UpperCamelCase__ : Optional[Any] =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length")
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length")
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length")
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length")
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" , )
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase__( snake_case__ ):
'''simple docstring'''
pass
| 582 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] ) -> list[tuple[int, int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) # No of vertices in graph
SCREAMING_SNAKE_CASE_ : List[Any] = [0] * n
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [False] * n
def dfs(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Tuple = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , id_ )
SCREAMING_SNAKE_CASE_ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
SCREAMING_SNAKE_CASE_ : Optional[Any] = min(low[at] , low[to] )
SCREAMING_SNAKE_CASE_ : list[tuple[int, int]] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE_ , -1 , SCREAMING_SNAKE_CASE_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 |
'''simple docstring'''
import re
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 421 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any ="SpeechT5FeatureExtractor"
__A : List[str] ="SpeechT5Tokenizer"
def __init__( self ,_snake_case ,_snake_case ):
super().__init__(_snake_case ,_snake_case )
def __call__( self ,*_snake_case ,**_snake_case ):
UpperCAmelCase_ : Any = kwargs.pop("audio" ,_snake_case )
UpperCAmelCase_ : Any = kwargs.pop("text" ,_snake_case )
UpperCAmelCase_ : int = kwargs.pop("text_target" ,_snake_case )
UpperCAmelCase_ : Tuple = kwargs.pop("audio_target" ,_snake_case )
UpperCAmelCase_ : int = kwargs.pop("sampling_rate" ,_snake_case )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
UpperCAmelCase_ : int = self.feature_extractor(_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
elif text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(_snake_case ,**_snake_case )
else:
UpperCAmelCase_ : int = None
if audio_target is not None:
UpperCAmelCase_ : Tuple = self.feature_extractor(audio_target=_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = targets["input_values"]
elif text_target is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_snake_case ,**_snake_case )
UpperCAmelCase_ : Tuple = targets["input_ids"]
else:
UpperCAmelCase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Optional[int] = labels
UpperCAmelCase_ : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
UpperCAmelCase_ : List[Any] = kwargs.pop("input_values" ,_snake_case )
UpperCAmelCase_ : List[str] = kwargs.pop("input_ids" ,_snake_case )
UpperCAmelCase_ : List[Any] = kwargs.pop("labels" ,_snake_case )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
UpperCAmelCase_ : str = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
elif input_ids is not None:
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_snake_case ,**_snake_case )
else:
UpperCAmelCase_ : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_snake_case ,_snake_case ) and "input_ids" in labels[0]):
UpperCAmelCase_ : int = self.tokenizer.pad(_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[int] = targets["input_ids"]
else:
UpperCAmelCase_ : Dict = self.feature_extractor.feature_size
UpperCAmelCase_ : List[Any] = self.feature_extractor.num_mel_bins
UpperCAmelCase_ : int = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
UpperCAmelCase_ : Union[str, Any] = feature_size_hack
UpperCAmelCase_ : Union[str, Any] = targets["input_values"]
else:
UpperCAmelCase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Dict = labels
UpperCAmelCase_ : List[str] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase_ : Union[str, Any] = decoder_attention_mask
return inputs
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.decode(*_snake_case ,**_snake_case )
| 323 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_lowerCamelCase = """"""
_lowerCamelCase = """"""
_lowerCamelCase = """"""
_lowerCamelCase = """"""
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ : Dict = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=2_00 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
UpperCAmelCase_ : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ : Tuple = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=2_00 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
UpperCAmelCase_ : Optional[int] = alltweets[-1].id - 1
print(F'''...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
UpperCAmelCase_ : str = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 323 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a :List[Any] = False, False, False
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = None
_SCREAMING_SNAKE_CASE :List[Any] = True
_SCREAMING_SNAKE_CASE :int = True
_SCREAMING_SNAKE_CASE :int = None
# Automatically constructed
_SCREAMING_SNAKE_CASE :Optional[Any] = """dict"""
_SCREAMING_SNAKE_CASE :Optional[int] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default="""Audio""" , init=snake_case_ , repr=snake_case_)
def __call__( self ) -> List[str]:
"""simple docstring"""
return self.pa_type
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE__ : Dict = BytesIO()
sf.write(lowerCamelCase__ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE__ : Any = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
SCREAMING_SNAKE_CASE__ : str = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
SCREAMING_SNAKE_CASE__ : Optional[int] = BytesIO(bytes() )
sf.write(lowerCamelCase__ , lowerCamelCase__ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _a ( self , _a , _a = None ) -> Union[str, Any]:
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
SCREAMING_SNAKE_CASE__ : int = xsplitext(lowerCamelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
SCREAMING_SNAKE_CASE__ : Dict = token_per_repo_id or {}
SCREAMING_SNAKE_CASE__ : List[str] = path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE__ : List[str] = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["""repo_id"""]
SCREAMING_SNAKE_CASE__ : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE__ : Optional[int] = None
with xopen(lowerCamelCase__ , """rb""" , use_auth_token=lowerCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = sf.read(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = sf.read(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : int = array.T
if self.mono:
SCREAMING_SNAKE_CASE__ : Any = librosa.to_mono(lowerCamelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE__ : Dict = librosa.resample(lowerCamelCase__ , orig_sr=lowerCamelCase__ , target_sr=self.sampling_rate )
SCREAMING_SNAKE_CASE__ : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _a ( self ) -> List[str]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ : Any = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ : Any = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
SCREAMING_SNAKE_CASE__ : int = pa.array([Audio().encode_example(lowerCamelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def _a ( self , _a ) -> int:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(lowerCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : str = pa.array(
[os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
| 680 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Any = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''donut-swin'''
_snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=2_2_4 , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=9_6 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[3, 6, 1_2, 2_4] , lowerCamelCase__=7 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase__ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
| 212 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] )-> Optional[Any]:
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'imagenet-1k-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCamelCase = BitConfig(
conv_layer=snake_case , num_labels=1_000 , idalabel=snake_case , labelaid=snake_case , )
return config
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] )-> str:
if "stem.conv" in name:
_lowerCamelCase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowerCamelCase = name.replace('blocks' , 'layers' )
if "head.fc" in name:
_lowerCamelCase = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_lowerCamelCase = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_lowerCamelCase = 'bit.encoder.' + name
return name
def SCREAMING_SNAKE_CASE_ ( )-> Optional[Any]:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Optional[int] , snake_case : List[Any]=False )-> int:
_lowerCamelCase = get_config(snake_case )
# load original model from timm
_lowerCamelCase = create_model(snake_case , pretrained=snake_case )
timm_model.eval()
# load state_dict of original model
_lowerCamelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCamelCase = state_dict.pop(snake_case )
_lowerCamelCase = val.squeeze() if 'head' in key else val
# load HuggingFace model
_lowerCamelCase = BitForImageClassification(snake_case )
model.eval()
model.load_state_dict(snake_case )
# create image processor
_lowerCamelCase = create_transform(**resolve_data_config({} , model=snake_case ) )
_lowerCamelCase = transform.transforms
_lowerCamelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_lowerCamelCase = BitImageProcessor(
do_resize=snake_case , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCamelCase = prepare_img()
_lowerCamelCase = transform(snake_case ).unsqueeze(0 )
_lowerCamelCase = processor(snake_case , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(snake_case , snake_case )
# verify logits
with torch.no_grad():
_lowerCamelCase = model(snake_case )
_lowerCamelCase = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCamelCase = timm_model(snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
A_ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
A_ : Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 720 |
"""simple docstring"""
A_ : List[Any] =9.8_0665
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float = g )-> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 222 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Any ):
a__ : str = params
a__ : Any = np.array(lowerCamelCase__ )
a__ : Dict = np.array([len(lowerCamelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : Any ):
return len(self.lengths )
def _UpperCamelCase( self : Dict ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _UpperCamelCase( self : int ):
a__ : List[Any] = self.params.max_model_input_size
a__ : str = self.lengths > max_len
logger.info(f'''Splitting {sum(lowerCamelCase__ )} too long sequences.''' )
def divide_chunks(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ):
return [l[i : i + n] for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )]
a__ : Union[str, Any] = []
a__ : str = []
if self.params.mlm:
a__, a__ : List[str] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
a__, a__ : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : Tuple = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : Optional[Any] = np.insert(lowerCamelCase__ , 0 , lowerCamelCase__ )
if sub_s[-1] != sep_id:
a__ : int = np.insert(lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
assert len(lowerCamelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCamelCase__ )
new_tok_ids.extend(lowerCamelCase__ )
new_lengths.extend([len(lowerCamelCase__ ) for l in sub_seqs] )
a__ : Dict = np.array(lowerCamelCase__ )
a__ : Union[str, Any] = np.array(lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : Tuple = len(self )
a__ : str = self.lengths > 11
a__ : Union[str, Any] = self.token_ids[indices]
a__ : List[Any] = self.lengths[indices]
a__ : Tuple = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _UpperCamelCase( self : str ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Tuple = self.params.special_tok_ids["unk_token"]
a__ : Optional[int] = len(self )
a__ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Any = (unk_occs / self.lengths) < 0.5
a__ : str = self.token_ids[indices]
a__ : Optional[int] = self.lengths[indices]
a__ : List[Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _UpperCamelCase( self : Dict ):
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Tuple ):
a__ : str = [t[0] for t in batch]
a__ : Any = [t[1] for t in batch]
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
# Max for paddings
a__ : Dict = max(lowerCamelCase__ )
# Pad token ids
if self.params.mlm:
a__ : Optional[int] = self.params.special_tok_ids["pad_token"]
else:
a__ : int = self.params.special_tok_ids["unk_token"]
a__ : str = [list(t.astype(lowerCamelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCamelCase__ )
assert all(len(lowerCamelCase__ ) == max_seq_len_ for t in tk_ )
a__ : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(lowerCamelCase__ ) # (bs)
return tk_t, lg_t
| 37 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( A , A , A , A=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =[], []
UpperCAmelCase__ =list(zip(A , A ) )
UpperCAmelCase__ , UpperCAmelCase__ =sorted_examples[0]
def is_too_big(A ):
return tok(A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ =new_src + " " + src
UpperCAmelCase__ =new_tgt + " " + tgt
if is_too_big(A ) or is_too_big(A ): # cant fit, finalize example
finished_src.append(A )
finished_tgt.append(A )
UpperCAmelCase__ , UpperCAmelCase__ =src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A )
finished_tgt.append(A )
return finished_src, finished_tgt
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =Path(A )
save_path.mkdir(exist_ok=A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ =pack_examples(A , A , A , A )
print(F"""packed {split} split from {len(A )} examples -> {len(A )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(A ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(A , save_path / F"""{split}.source""" )
shutil.copyfile(A , save_path / F"""{split}.target""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A , default=128 )
parser.add_argument("--data_dir" , type=A )
parser.add_argument("--save_path" , type=A )
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 625 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
__a : Optional[int] = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , _lowerCamelCase ):
__a : str = requirement, None, None
else:
__a : Dict = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
__a : Union[str, Any] = match[0]
__a : Dict = want_full.split(""",""" ) # there could be multiple requirements
__a : List[str] = {}
for w in want_range:
__a : Dict = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , _lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
__a : Union[str, Any] = match[0]
__a : Any = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a : Optional[Any] = """.""".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
__a : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : int ):
__a : Optional[int] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(_lowerCamelCase , _lowerCamelCase )
| 706 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=16 ,_lowerCAmelCase=[1, 2, 1] ,_lowerCAmelCase=[2, 2, 4] ,_lowerCAmelCase=2 ,_lowerCAmelCase=2.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=8 ,_lowerCAmelCase=["stage1", "stage2", "stage3"] ,_lowerCAmelCase=[1, 2, 3] ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = patch_norm
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = is_training
lowerCamelCase__ = scope
lowerCamelCase__ = use_labels
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = encoder_stride
lowerCamelCase__ = out_features
lowerCamelCase__ = out_indices
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase )
lowerCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
lowerCamelCase__ = ["""stem"""]
lowerCamelCase__ = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MaskFormerSwinModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
return
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase )
# Swin has a different seq_length
lowerCamelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase__ = True
self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase__ = True
self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
self.check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,(padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCAmelCase ):
lowerCamelCase__ = 0
return t
def check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase={} ):
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase ,return_dict=_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,return_dict=_lowerCAmelCase ,**_lowerCAmelCase ).to_tuple()
def recursive_check(_lowerCAmelCase ,_lowerCAmelCase ):
if isinstance(_lowerCAmelCase ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase ,_lowerCAmelCase ):
recursive_check(_lowerCAmelCase ,_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(_lowerCAmelCase ,_lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) ,set_nan_tensor_to_zero(_lowerCAmelCase ) ,atol=1E-5 ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'''
) ,)
recursive_check(_lowerCAmelCase ,_lowerCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,{"""output_hidden_states""": True} )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,{"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase__ (unittest.TestCase ,a ):
'''simple docstring'''
_UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCamelCase = MaskFormerSwinConfig
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MaskFormerSwinModelTester(self )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowerCamelCase__ = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
lowerCamelCase__ = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,_lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCamelCase__ = backbone(**_lowerCAmelCase ,output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCamelCase__ = backbone(**_lowerCAmelCase ,output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 50 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[int] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 550 | 0 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :str ) -> Optional[int]:
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {}
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str=1 , **lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = []
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = placeholder_token + f"_{i}"
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
SCREAMING_SNAKE_CASE : str = output
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :Optional[int]=1.0 ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(len(lowerCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE : List[Any] = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE : List[str] = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE : Any = copy.copy(lowerCamelCase_ )
random.shuffle(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = text.replace(lowerCamelCase_ , ''' '''.join(lowerCamelCase_ ) )
return text
def __call__( self :Any , lowerCamelCase_ :Union[str, Any] , *lowerCamelCase_ :str , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Tuple=1.0 , **lowerCamelCase_ :Tuple ) -> Any:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str , *lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[str]=1.0 , **lowerCamelCase_ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : List[str] = logging.getLogger(__name__)
def __snake_case ( ):
UpperCamelCase = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''')
parser.add_argument('''--file_path''', type=_UpperCAmelCase, default='''data/dump.txt''', help='''The path to the data.''')
parser.add_argument('''--tokenizer_type''', type=_UpperCAmelCase, default='''bert''', choices=['''bert''', '''roberta''', '''gpt2'''])
parser.add_argument('''--tokenizer_name''', type=_UpperCAmelCase, default='''bert-base-uncased''', help='''The tokenizer to use.''')
parser.add_argument('''--dump_file''', type=_UpperCAmelCase, default='''data/dump''', help='''The dump file prefix.''')
UpperCamelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})')
if args.tokenizer_type == "bert":
UpperCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name)
UpperCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
UpperCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name)
UpperCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
UpperCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name)
UpperCamelCase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
UpperCamelCase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}')
with open(args.file_path, '''r''', encoding='''utf8''') as fp:
UpperCamelCase = fp.readlines()
logger.info('''Start encoding''')
logger.info(f'{len(_UpperCAmelCase)} examples to process.')
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 1_0000
UpperCamelCase = time.time()
for text in data:
UpperCamelCase = f'{bos} {text.strip()} {sep}'
UpperCamelCase = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase)
rslt.append(_UpperCAmelCase)
iter += 1
if iter % interval == 0:
UpperCamelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl')
UpperCamelCase = time.time()
logger.info('''Finished binarization''')
logger.info(f'{len(_UpperCAmelCase)} examples processed.')
UpperCamelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
UpperCamelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCamelCase = [np.uintaa(_UpperCAmelCase) for d in rslt]
else:
UpperCamelCase = [np.intaa(_UpperCAmelCase) for d in rslt]
random.shuffle(rslt_)
logger.info(f'Dump to {dp_file}')
with open(_UpperCAmelCase, '''wb''') as handle:
pickle.dump(rslt_, _UpperCAmelCase, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| 212 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in table:
res += inp[i - 1]
return res
def __snake_case ( _UpperCAmelCase : Dict):
return data[1:] + data[0]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in range(len(_UpperCAmelCase)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : Dict):
UpperCamelCase = int('''0b''' + data[0] + data[-1], 2)
UpperCamelCase = int('''0b''' + data[1:3], 2)
return bin(s[row][col])[2:]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int]):
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[:4]) # noqa: E741
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[4:])
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + l # noqa: E741
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + r
UpperCamelCase = apply_table(l + r, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
return temp + right
if __name__ == "__main__":
snake_case_ : List[Any] = input('Enter 10 bit key: ')
snake_case_ : Union[str, Any] = input('Enter 8 bit message: ')
snake_case_ : List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
snake_case_ : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
snake_case_ : Tuple = [2, 4, 3, 1]
snake_case_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : str = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : Optional[int] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : int = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Union[str, Any] = apply_table(key, paa_table)
snake_case_ : Optional[int] = temp[:5]
snake_case_ : str = temp[5:]
snake_case_ : str = left_shift(left)
snake_case_ : Dict = left_shift(right)
snake_case_ : List[Any] = apply_table(left + right, pa_table)
snake_case_ : Union[str, Any] = left_shift(left)
snake_case_ : Union[str, Any] = left_shift(right)
snake_case_ : str = left_shift(left)
snake_case_ : Tuple = left_shift(right)
snake_case_ : List[str] = apply_table(left + right, pa_table)
# encryption
snake_case_ : Any = apply_table(message, IP)
snake_case_ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : List[str] = function(expansion, sa, sa, keya, temp)
snake_case_ : Dict = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : List[Any] = apply_table(CT, IP)
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : List[Any] = temp[4:] + temp[:4]
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 212 | 1 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : str = DownBlockaD # noqa F405
lowerCamelCase__ : Any = 'down'
def lowerCAmelCase__ ( self ):
a_ = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[Any] = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase__ : List[Any] = 'down'
def lowerCAmelCase__ ( self ):
a_ = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[Any] = AttnDownBlockaD # noqa F405
lowerCamelCase__ : Any = 'down'
def lowerCAmelCase__ ( self ):
a_ = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Tuple = CrossAttnDownBlockaD # noqa F405
lowerCamelCase__ : Any = 'down'
def lowerCAmelCase__ ( self ):
a_ , a_ = super().prepare_init_args_and_inputs_for_common()
a_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[int] = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase__ : Dict = 'down'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = super().prepare_init_args_and_inputs_for_common()
a_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCAmelCase__ ( self ):
a_ = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[Any] = SkipDownBlockaD # noqa F405
lowerCamelCase__ : int = 'down'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_skip_sample=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
lowerCamelCase__ : Union[str, Any] = 'down'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_skip_sample=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : List[str] = DownEncoderBlockaD # noqa F405
lowerCamelCase__ : List[Any] = 'down'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
a_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase__ : Union[str, Any] = 'down'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
a_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Tuple = UNetMidBlockaD # noqa F405
lowerCamelCase__ : Optional[Any] = 'mid'
def lowerCAmelCase__ ( self ):
a_ = {
"""in_channels""": 32,
"""temb_channels""": 1_28,
}
a_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase__ : Tuple = 'mid'
def lowerCAmelCase__ ( self ):
a_ , a_ = super().prepare_init_args_and_inputs_for_common()
a_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase__ : Dict = 'mid'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = super().prepare_init_args_and_inputs_for_common()
a_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : List[str] = UpBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Tuple = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[int] = CrossAttnUpBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = super().prepare_init_args_and_inputs_for_common()
a_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[Any] = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase , include_encoder_hidden_states=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = super().prepare_init_args_and_inputs_for_common()
a_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : int = AttnUpBlockaD # noqa F405
lowerCamelCase__ : Union[str, Any] = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCAmelCase__ ( self ):
a_ = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Tuple = SkipUpBlockaD # noqa F405
lowerCamelCase__ : List[Any] = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : str = AttnSkipUpBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : str = UpDecoderBlockaD # noqa F405
lowerCamelCase__ : str = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = {"""in_channels""": 32, """out_channels""": 32}
a_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(UpperCAmelCase )
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : int = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase__ : Optional[Any] = 'up'
@property
def lowerCAmelCase__ ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = {"""in_channels""": 32, """out_channels""": 32}
a_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
a_ = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(UpperCAmelCase )
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
while b:
a_ , a_ = b, a % b
return a
def UpperCamelCase_ ( A__ , A__ ):
return a if b == 0 else euclidean_gcd_recursive(A__ , a % b )
def UpperCamelCase_ ( ):
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 511 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Dict = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['ConvNextFeatureExtractor']
snake_case : Any = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 605 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case : Tuple = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ ,id=UpperCAmelCase__ )
| 605 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def lowercase_ ( _lowerCamelCase: torch.nn.Module , _lowerCamelCase: BnbQuantizationConfig , _lowerCamelCase: Union[str, os.PathLike] = None , _lowerCamelCase: Optional[Dict[str, Union[int, str, torch.device]]] = None , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: Optional[Dict[Union[int, str], Union[int, str]]] = None , _lowerCamelCase: Optional[Union[str, os.PathLike]] = None , _lowerCamelCase: bool = False , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : int = bnb_quantization_config.load_in_abit
__lowerCamelCase : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
__lowerCamelCase : int = []
# custom device map
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(device_map.keys() ) > 1:
__lowerCamelCase : Optional[Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__lowerCamelCase : Optional[Any] = get_keys_to_not_convert(_lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCamelCase )
__lowerCamelCase : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCamelCase )
# compatibility with peft
__lowerCamelCase : Tuple = load_in_abit
__lowerCamelCase : str = load_in_abit
__lowerCamelCase : Optional[Any] = get_parameter_device(_lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
__lowerCamelCase : Optional[int] = replace_with_bnb_layers(_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
# convert param to the right dtype
__lowerCamelCase : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__lowerCamelCase : int = name.replace(".weight" , "" ).replace(".bias" , "" )
__lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCamelCase ):
param.to(_lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__lowerCamelCase : Union[str, Any] = replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = get_quantized_model_device_map(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_memory=_lowerCamelCase , no_split_module_classes=_lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__lowerCamelCase : Any = True
__lowerCamelCase : List[Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCamelCase , offload_state_dict=_lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCamelCase , device_map=_lowerCamelCase , offload_dir=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: int=None , _lowerCamelCase: int=None , _lowerCamelCase: Any=None ) -> Any:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
__lowerCamelCase : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
__lowerCamelCase : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__lowerCamelCase : List[Any] = {}
__lowerCamelCase : Any = special_dtypes
__lowerCamelCase : Optional[Any] = no_split_module_classes
__lowerCamelCase : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__lowerCamelCase : List[str] = get_balanced_memory(
_lowerCamelCase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowerCamelCase , **_lowerCamelCase , )
__lowerCamelCase : Dict = max_memory
__lowerCamelCase : str = infer_auto_device_map(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# check if don't have any quantized module on the cpu
__lowerCamelCase : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__lowerCamelCase : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
__lowerCamelCase : Dict = []
__lowerCamelCase , __lowerCamelCase : Dict = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: str=None , _lowerCamelCase: List[Any]=None , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
__lowerCamelCase : Union[str, Any] = []
current_key_name.append(_lowerCamelCase )
if isinstance(_lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__lowerCamelCase : Union[str, Any] = ".".join(_lowerCamelCase )
__lowerCamelCase : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__lowerCamelCase : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__lowerCamelCase : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__lowerCamelCase : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
__lowerCamelCase : Dict = module.weight.data
if module.bias is not None:
__lowerCamelCase : Dict = module.bias.data
bnb_module.requires_grad_(_lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[Any] = True
if len(list(module.children() ) ) > 0:
__lowerCamelCase , __lowerCamelCase : Any = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Any:
'''simple docstring'''
with init_empty_weights():
__lowerCamelCase : Dict = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__lowerCamelCase : Optional[Any] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__lowerCamelCase : List[str] = sum(_lowerCamelCase , [] )
__lowerCamelCase : List[Any] = len(_lowerCamelCase ) > 0
# Check if it is a base model
__lowerCamelCase : List[str] = False
if hasattr(_lowerCamelCase , "base_model_prefix" ):
__lowerCamelCase : str = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCamelCase : List[Any] = list(model.named_children() )
__lowerCamelCase : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCamelCase : Union[str, Any] = set(_lowerCamelCase ) - set(_lowerCamelCase )
__lowerCamelCase : Optional[int] = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
__lowerCamelCase : Dict = [".weight", ".bias"]
__lowerCamelCase : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCamelCase : Optional[Any] = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
def lowercase_ ( _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
for m in model.modules():
if isinstance(_lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _lowerCamelCase: nn.Module ) -> List[str]:
'''simple docstring'''
return next(parameter.parameters() ).device
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] , _lowerCamelCase: Dict , _lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , 0 , dtype=_lowerCamelCase , value=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = param_name
__lowerCamelCase : Union[str, Any] = model
if "." in tensor_name:
__lowerCamelCase : Dict = tensor_name.split("." )
for split in splits[:-1]:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__lowerCamelCase : List[Any] = new_module
__lowerCamelCase : Optional[Any] = splits[-1]
# offload weights
__lowerCamelCase : str = False
offload_weight(module._parameters[tensor_name] , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowerCamelCase , index=_lowerCamelCase , )
else:
offload_weight(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
offload_weight(_lowerCamelCase , param_name.replace("weight" , "SCB" ) , _lowerCamelCase , index=_lowerCamelCase )
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , "meta" , dtype=_lowerCamelCase , value=torch.empty(*param.size() ) ) | 366 | """simple docstring"""
import os
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__lowerCamelCase : int = os.path.join(_lowerCamelCase , "triangle.txt" )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : List[str] = f.readlines()
__lowerCamelCase : List[str] = []
for line in triangle:
__lowerCamelCase : Optional[int] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowerCamelCase : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCamelCase : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 366 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE=0.01 , _SCREAMING_SNAKE_CASE=1000 ) -> Tuple:
snake_case_ : int = p_stop
snake_case_ : List[Any] = max_length
def __iter__( self ) -> str:
snake_case_ : Union[str, Any] = 0
snake_case_ : Any = False
while not stop and count < self.max_length:
yield count
count += 1
snake_case_ : List[str] = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> str:
snake_case_ : Optional[int] = [
BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
for i in range(2 )
]
snake_case_ : Optional[int] = [list(_SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(_SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
snake_case_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
snake_case_ : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
snake_case_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
snake_case_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
snake_case_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
snake_case_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : int = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
snake_case_ : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
snake_case_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : int = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
snake_case_ : Dict = [BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
random.seed(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = list(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [
IterableDatasetShard(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE , num_processes=_SCREAMING_SNAKE_CASE , process_index=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , )
for i in range(_SCREAMING_SNAKE_CASE )
]
snake_case_ : Tuple = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
snake_case_ : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
snake_case_ : Optional[int] = []
for idx in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(_SCREAMING_SNAKE_CASE , reference[: len(_SCREAMING_SNAKE_CASE )] )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : List[str] = 42
snake_case_ : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
snake_case_ : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = SkipBatchSampler(_SCREAMING_SNAKE_CASE , 2 )
self.assertListEqual(list(_SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[str] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Any = DataLoader(list(range(16 ) ) , batch_size=4 )
snake_case_ : Optional[int] = skip_first_batches(_SCREAMING_SNAKE_CASE , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Optional[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowerCAmelCase ( self ) -> Optional[int]:
Accelerator()
snake_case_ : Optional[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 568 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase : List[Any] = datasets.logging.get_logger(__name__)
lowercase : List[str] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase : Dict = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase : Tuple = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if self.config_name == "default":
snake_case_ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case_ : int = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Dict:
if gpus is None:
snake_case_ : Union[str, Any] = 1 if torch.cuda.is_available() else 0
snake_case_ : str = {"src": sources, "mt": predictions, "ref": references}
snake_case_ : Dict = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
snake_case_ , snake_case_ : Union[str, Any] = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 568 | 1 |
import os
import sys
lowerCAmelCase = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
return AutoConfig.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModel.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Tuple:
'''simple docstring'''
return AutoModel.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> List[Any]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__A , **__A )
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int ):
if len(lowercase__ ) < 2:
return collection
def circle_sort_util(UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ) -> bool:
UpperCAmelCase : Any = False
if low == high:
return swapped
UpperCAmelCase : int = low
UpperCAmelCase : Optional[Any] = high
while left < right:
if collection[left] > collection[right]:
UpperCAmelCase : List[Any] = (
collection[right],
collection[left],
)
UpperCAmelCase : str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCAmelCase : List[str] = (
collection[right + 1],
collection[left],
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Dict = low + int((high - low) / 2 )
UpperCAmelCase : Union[str, Any] = circle_sort_util(lowercase__ , lowercase__ , lowercase__ )
UpperCAmelCase : Optional[int] = circle_sort_util(lowercase__ , mid + 1 , lowercase__ )
return swapped or left_swap or right_swap
UpperCAmelCase : Union[str, Any] = True
while is_not_sorted is True:
UpperCAmelCase : int = circle_sort_util(lowercase__ , 0 , len(lowercase__ ) - 1 )
return collection
if __name__ == "__main__":
A: List[str] = input("Enter numbers separated by a comma:\n").strip()
A: List[str] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 160 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=2 , snake_case__ : int=56 , snake_case__ : Any=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Tuple=99 , snake_case__ : Any=32 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=2 , snake_case__ : Any=7 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : List[Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[int]=5_12 , snake_case__ : Optional[int]=16 , snake_case__ : int=2 , snake_case__ : List[Any]=0.02 , snake_case__ : int=4 , snake_case__ : Optional[Any]="block_sparse" , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=False , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[Any]=3 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[int] = use_attention_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Any = num_choices
UpperCAmelCase__ : List[str] = rescale_embeddings
UpperCAmelCase__ : Union[str, Any] = attention_type
UpperCAmelCase__ : Union[str, Any] = use_bias
UpperCAmelCase__ : Union[str, Any] = block_size
UpperCAmelCase__ : Any = num_random_blocks
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Union[str, Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Any = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase_ : Optional[Any] = False
lowercase_ : str = False
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : int , snake_case__ : Optional[int]=None , **snake_case__ : Optional[Any] ):
return model(input_ids=snake_case__ , attention_mask=snake_case__ , **snake_case__ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase__ : List[Any] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Any="outputs" , snake_case__ : Optional[Any]=None ):
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 199 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
debug_launcher(test_script.main )
def __snake_case ( self : List[Any] ):
debug_launcher(test_ops.main )
| 570 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a__ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , *a__ : Optional[int] , **a__ : List[Any] ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 570 | 1 |
import functools
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
# Validation
if not isinstance(__snake_case , __snake_case ) or not all(isinstance(__snake_case , __snake_case ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__snake_case ) != 3 or not all(isinstance(__snake_case , __snake_case ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__snake_case ) == 0:
return 0
if min(__snake_case ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__snake_case ) >= 3_6_6:
raise ValueError("""All days elements should be less than 366""" )
_UpperCAmelCase = set(__snake_case )
@functools.cache
def dynamic_programming(__snake_case ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
def UpperCAmelCase__ (UpperCamelCase_ = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 550 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : Any = KandinskyImgaImgPipeline
UpperCAmelCase : Optional[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
UpperCAmelCase : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCAmelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Union[str, Any] = False
@property
def _lowercase (self : Optional[int]) -> List[Any]:
return 32
@property
def _lowercase (self : List[Any]) -> Optional[int]:
return 32
@property
def _lowercase (self : Any) -> Optional[Any]:
return self.time_input_dim
@property
def _lowercase (self : Dict) -> int:
return self.time_input_dim * 4
@property
def _lowercase (self : int) -> int:
return 1_00
@property
def _lowercase (self : Optional[Any]) -> int:
__snake_case : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def _lowercase (self : Any) -> str:
torch.manual_seed(0)
__snake_case : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Dict = MultilingualCLIP(__UpperCamelCase)
__snake_case : int = text_encoder.eval()
return text_encoder
@property
def _lowercase (self : int) -> int:
torch.manual_seed(0)
__snake_case : int = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__snake_case : str = UNetaDConditionModel(**__UpperCamelCase)
return model
@property
def _lowercase (self : Any) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase (self : Optional[int]) -> Optional[Any]:
torch.manual_seed(0)
__snake_case : Tuple = VQModel(**self.dummy_movq_kwargs)
return model
def _lowercase (self : str) -> int:
__snake_case : str = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : int = self.dummy_unet
__snake_case : Tuple = self.dummy_movq
__snake_case : str = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__snake_case : List[str] = DDIMScheduler(**__UpperCamelCase)
__snake_case : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowercase (self : Dict , _A : Dict , _A : str=0) -> Union[str, Any]:
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase)).to(__UpperCamelCase)
__snake_case : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(__UpperCamelCase)
# create init_image
__snake_case : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase)).to(__UpperCamelCase)
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1)[0]
__snake_case : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(__UpperCamelCase).startswith('mps'):
__snake_case : List[Any] = torch.manual_seed(__UpperCamelCase)
else:
__snake_case : Dict = torch.Generator(device=__UpperCamelCase).manual_seed(__UpperCamelCase)
__snake_case : str = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Any = 'cpu'
__snake_case : Optional[int] = self.get_dummy_components()
__snake_case : Optional[Any] = self.pipeline_class(**__UpperCamelCase)
__snake_case : Any = pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__UpperCamelCase))
__snake_case : int = output.images
__snake_case : Union[str, Any] = pipe(
**self.get_dummy_inputs(__UpperCamelCase) , return_dict=__UpperCamelCase , )[0]
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
__snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Union[str, Any] = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Tuple) -> Tuple:
__snake_case : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
__snake_case : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
__snake_case : Union[str, Any] = 'A red cartoon frog, 4k'
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCamelCase)
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa)
__snake_case : Tuple = pipeline.to(__UpperCamelCase)
pipeline.set_progress_bar_config(disable=__UpperCamelCase)
__snake_case : int = torch.Generator(device='cpu').manual_seed(0)
__snake_case , __snake_case : Optional[int] = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__snake_case : Optional[Any] = pipeline(
__UpperCamelCase , image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
__snake_case : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase)
| 707 | """simple docstring"""
from typing import Any
class UpperCamelCase :
def __init__(self : List[str] , _A : Any) -> int:
__snake_case : Any = data
__snake_case : Dict = None
def __repr__(self : Tuple) -> str:
return f"Node({self.data})"
class UpperCamelCase :
def __init__(self : Union[str, Any]) -> Union[str, Any]:
__snake_case : Any = None
def __iter__(self : Tuple) -> Any:
__snake_case : List[str] = self.head
while node:
yield node.data
__snake_case : Any = node.next
def __len__(self : str) -> int:
return sum(1 for _ in self)
def __repr__(self : int) -> str:
return "->".join([str(_A) for item in self])
def __getitem__(self : List[Any] , _A : int) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__(self : int , _A : int , _A : Any) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case : Optional[int] = self.head
for _ in range(_A):
__snake_case : Any = current.next
__snake_case : Dict = data
def _lowercase (self : List[Any] , _A : Any) -> None:
self.insert_nth(len(self) , _A)
def _lowercase (self : List[str] , _A : Any) -> None:
self.insert_nth(0 , _A)
def _lowercase (self : Optional[Any] , _A : int , _A : Any) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case : str = Node(_A)
if self.head is None:
__snake_case : str = new_node
elif index == 0:
__snake_case : Union[str, Any] = self.head # link new_node to head
__snake_case : int = new_node
else:
__snake_case : Any = self.head
for _ in range(index - 1):
__snake_case : Any = temp.next
__snake_case : Dict = temp.next
__snake_case : str = new_node
def _lowercase (self : Optional[int]) -> None: # print every node data
print(self)
def _lowercase (self : Optional[Any]) -> Any:
return self.delete_nth(0)
def _lowercase (self : List[str]) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _lowercase (self : int , _A : int = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case : int = self.head # default first node
if index == 0:
__snake_case : Any = self.head.next
else:
__snake_case : List[Any] = self.head
for _ in range(index - 1):
__snake_case : List[str] = temp.next
__snake_case : Union[str, Any] = temp.next
__snake_case : str = temp.next.next
return delete_node.data
def _lowercase (self : str) -> bool:
return self.head is None
def _lowercase (self : Tuple) -> None:
__snake_case : List[Any] = None
__snake_case : Optional[Any] = self.head
while current:
# Store the current node's next node.
__snake_case : List[str] = current.next
# Make the current node's next point backwards
__snake_case : Optional[Any] = prev
# Make the previous node be the current node
__snake_case : Optional[Any] = current
# Make the current node the next node (to progress iteration)
__snake_case : Any = next_node
# Return prev in order to put the head at the end
__snake_case : Optional[Any] = prev
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCAmelCase_ ) == i
linked_list.insert_nth(UpperCAmelCase_ , i + 1 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCAmelCase_ ) == 9
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case : Tuple = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(-8 , 1 ) )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : str = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case : Optional[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case : int = linked_list.delete_head()
assert result == -9
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case : Tuple = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCAmelCase_ )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case : int = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(UpperCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
__snake_case : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(UpperCAmelCase_ )
print(F"length of linked_list is : {len(UpperCAmelCase_ )}" )
if __name__ == "__main__":
main()
| 192 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case ( a_ : str , a_ : str ) -> Union[str, Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase_ : Tuple = flax_key_tuple[:-1] + ("""weight""",)
UpperCamelCase_ : Tuple = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
UpperCamelCase_ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCamelCase_ : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase_ : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def snake_case ( a_ : Dict , a_ : List[str] , a_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
UpperCamelCase_ : List[str] = layer.split("""metadata""" )
UpperCamelCase_ : Union[str, Any] = """""".join(split_layer[0] )[:-1]
UpperCamelCase_ : int = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCamelCase_ : str = layer.split("""kvstore""" )
UpperCamelCase_ : List[str] = """""".join(split_layer[0] )[:-1]
UpperCamelCase_ : Union[str, Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCamelCase_ : str = layer.split("""/""" )
UpperCamelCase_ : Optional[Any] = """/""".join(split_layer[:-1] )
UpperCamelCase_ : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase_ : Union[str, Any] = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCamelCase_ : List[Any] = """file"""
else:
UpperCamelCase_ : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case ( a_ : Tuple , a_ : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = rename_keys(__UpperCamelCase )
UpperCamelCase_ : Tuple = {}
for k, v in current_block.items():
UpperCamelCase_ : Optional[Any] = v
UpperCamelCase_ : Any = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def snake_case ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] , a_ : str , a_ : Union[str, Any] = WEIGHTS_NAME ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = convert_file_size_to_int(__UpperCamelCase )
UpperCamelCase_ : str = []
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Dict = 0
UpperCamelCase_ : Any = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCamelCase_ : List[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCamelCase_ : Union[str, Any] = flatten_dict(__UpperCamelCase , sep="""/""" )
UpperCamelCase_ : Dict = {}
for layer in checkpoint_info.keys():
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Tuple = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
UpperCamelCase_ : int = content
else:
UpperCamelCase_ : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase_ : str = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase_ : List[str] = torch.tensor(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = """/""".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase_ : Dict = os.path.join(
__UpperCamelCase , weights_name.replace(""".bin""" , f"-{len(__UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase_ : Dict = {}
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase_ : Tuple = os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , f"-{len(__UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Union[str, Any] = {}
for idx, shard in enumerate(__UpperCamelCase ):
UpperCamelCase_ : List[str] = weights_name.replace(
""".bin""" , f"-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCamelCase_ : Union[str, Any] = os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : int = shard
for key in shard:
UpperCamelCase_ : Optional[Any] = shard_file
# Add the metadata
UpperCamelCase_ : List[Any] = {"""total_size""": total_size}
UpperCamelCase_ : Optional[int] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase_ : Any = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case ( ) -> Optional[Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase_ : List[Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCamelCase_ : Optional[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCamelCase_ : Optional[Any] = TaTokenizer.from_pretrained("""t5-small""" )
UpperCamelCase_ : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCamelCase_ : str = tokenizer(__UpperCamelCase , return_tensors="""pt""" ).input_ids
UpperCamelCase_ : Dict = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 208 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _a ( *_snake_case : Any , **_snake_case : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
[{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}],
] , )
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] )
@slow
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self : Dict ):
"""simple docstring"""
pass
| 9 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Tuple ="ChineseCLIPImageProcessor"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=None , **SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.image_processor
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
| 170 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="roc_bert"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=9_10 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : int=2_48_58 , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = enable_pronunciation
UpperCamelCase = enable_shape
UpperCamelCase = pronunciation_embed_dim
UpperCamelCase = pronunciation_vocab_size
UpperCamelCase = shape_embed_dim
UpperCamelCase = shape_vocab_size
UpperCamelCase = concat_input
UpperCamelCase = position_embedding_type
UpperCamelCase = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 170 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _A ( UpperCAmelCase_ ):
def __init__( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 269 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 270 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
from pathlib import Path
import fire
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Path(_UpperCamelCase)
UpperCamelCase = Path(_UpperCamelCase)
dest_dir.mkdir(exist_ok=_UpperCamelCase)
for path in src_dir.iterdir():
UpperCamelCase = [x.rstrip() for x in list(path.open().readlines())][:n]
UpperCamelCase = dest_dir.joinpath(path.name)
print(_UpperCamelCase)
dest_path.open('w').write('\n'.join(_UpperCamelCase))
if __name__ == "__main__":
fire.Fire(minify)
| 280 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( a : list[int] , a : int ) -> int:
"""simple docstring"""
if len(a ) < k or k < 0:
raise ValueError("Invalid Input" )
a__ :Optional[int] = sum(array[:k] )
for i in range(len(a ) - k ):
a__ :int = current_sum - array[i] + array[i + k]
a__ :List[str] = max(a , a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
snake_case__ = [randint(-1000, 1000) for i in range(100)]
snake_case__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 373 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'albert'
def __init__( self : Any , __A : Optional[int]=30000 , __A : List[str]=128 , __A : int=4096 , __A : Any=12 , __A : Union[str, Any]=1 , __A : Optional[int]=64 , __A : Dict=16384 , __A : List[str]=1 , __A : Any="gelu_new" , __A : List[Any]=0 , __A : str=0 , __A : List[str]=512 , __A : Optional[Any]=2 , __A : Tuple=0.02 , __A : int=1E-12 , __A : str=0.1 , __A : Optional[Any]="absolute" , __A : Tuple=0 , __A : str=2 , __A : Union[str, Any]=3 , **__A : Union[str, Any] , ) ->str:
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a__ :Tuple = vocab_size
a__ :Dict = embedding_size
a__ :Union[str, Any] = hidden_size
a__ :int = num_hidden_layers
a__ :List[Any] = num_hidden_groups
a__ :str = num_attention_heads
a__ :Optional[Any] = inner_group_num
a__ :Any = hidden_act
a__ :Optional[int] = intermediate_size
a__ :Optional[Any] = hidden_dropout_prob
a__ :Optional[int] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :List[Any] = type_vocab_size
a__ :Dict = initializer_range
a__ :Optional[int] = layer_norm_eps
a__ :Optional[Any] = classifier_dropout_prob
a__ :Optional[Any] = position_embedding_type
class lowerCAmelCase_ ( _a):
@property
def _snake_case ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ :List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ :Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 373 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = []
for rt in rc.restypes:
UpperCAmelCase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase__ : str = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
UpperCAmelCase__ : Dict = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein["aatype"].device , )
UpperCAmelCase__ : List[str] = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein["aatype"].device , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein["aatype"].device , )
UpperCAmelCase__ : Optional[int] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase__ : List[str] = restype_atomaa_mask[protein_aatype]
UpperCAmelCase__ : Any = residx_atomaa_mask
UpperCAmelCase__ : str = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase__ : Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase__ : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase__ : List[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase__ : Tuple = rc.restype_atoa[restype_letter]
UpperCAmelCase__ : Tuple = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase__ : Any = rc.atom_order[atom_name]
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = restype_atomaa_mask[protein_aatype]
UpperCAmelCase__ : Optional[Any] = residx_atomaa_mask
return protein
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Optional[int] = tree_map(lambda lowercase__ : torch.tensor(__UpperCAmelCase , device=batch["aatype"].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase__ : Optional[Any] = tensor_tree_map(lambda lowercase__ : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 199 |
'''simple docstring'''
import datasets
__lowerCamelCase : int = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__lowerCamelCase : List[Any] = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__lowerCamelCase : List[Any] = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
| 501 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self, _a, _a=7, _a=3, _a=18, _a=30, _a=4_00, _a=True, _a=None, _a=True, _a=None, ) -> int:
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 20}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 18, "width": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
def __lowerCAmelCase ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =MobileNetVaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = MobileNetVaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a, "do_resize" ) )
self.assertTrue(hasattr(_a, "size" ) )
self.assertTrue(hasattr(_a, "do_center_crop" ) )
self.assertTrue(hasattr(_a, "crop_size" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84} )
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a, Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a, np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a, torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(_a, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
| 703 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _A ( __snake_case :str ) -> int:
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def _A ( __snake_case :List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__SCREAMING_SNAKE_CASE = list(__snake_case )
return word_list
def _A ( __snake_case :List[str] , __snake_case :set() ) -> Any:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(__snake_case ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(__snake_case )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__SCREAMING_SNAKE_CASE = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = "##" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def _A ( __snake_case :List[str] , __snake_case :LTP , __snake_case :BertTokenizer ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__SCREAMING_SNAKE_CASE = add_sub_symbol(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def _A ( __snake_case :Tuple ) -> Any:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 214 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase__ : Optional[Any] = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCamelCase__ : Union[str, Any] = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCamelCase__ : List[str] = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCamelCase__ : Any = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
lowerCamelCase__ : int = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
lowerCamelCase__ : List[Any] = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Any:
'''simple docstring'''
lowercase__ : List[str] = checkpoint[F'{old_prefix}.in_layers.0.weight']
lowercase__ : Tuple = checkpoint[F'{old_prefix}.in_layers.0.bias']
lowercase__ : Any = checkpoint[F'{old_prefix}.in_layers.2.weight']
lowercase__ : Union[str, Any] = checkpoint[F'{old_prefix}.in_layers.2.bias']
lowercase__ : Optional[int] = checkpoint[F'{old_prefix}.emb_layers.1.weight']
lowercase__ : List[Any] = checkpoint[F'{old_prefix}.emb_layers.1.bias']
lowercase__ : List[Any] = checkpoint[F'{old_prefix}.out_layers.0.weight']
lowercase__ : Optional[int] = checkpoint[F'{old_prefix}.out_layers.0.bias']
lowercase__ : Union[str, Any] = checkpoint[F'{old_prefix}.out_layers.3.weight']
lowercase__ : Tuple = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
lowercase__ : Any = checkpoint[F'{old_prefix}.skip_connection.weight']
lowercase__ : Any = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ : str = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
lowercase__ , lowercase__ , lowercase__ : Any = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
lowercase__ : Optional[Any] = checkpoint[F'{old_prefix}.norm.weight']
lowercase__ : Tuple = checkpoint[F'{old_prefix}.norm.bias']
lowercase__ : int = weight_q.squeeze(-1 ).squeeze(-1 )
lowercase__ : int = bias_q.squeeze(-1 ).squeeze(-1 )
lowercase__ : str = weight_k.squeeze(-1 ).squeeze(-1 )
lowercase__ : Dict = bias_k.squeeze(-1 ).squeeze(-1 )
lowercase__ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
lowercase__ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
lowercase__ : List[Any] = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
lowercase__ : List[str] = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : List[Any] = torch.load(lowercase_ , map_location="""cpu""" )
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = checkpoint["""time_embed.0.weight"""]
lowercase__ : Any = checkpoint["""time_embed.0.bias"""]
lowercase__ : Dict = checkpoint["""time_embed.2.weight"""]
lowercase__ : str = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowercase__ : int = checkpoint["""label_emb.weight"""]
lowercase__ : Optional[int] = checkpoint["""input_blocks.0.0.weight"""]
lowercase__ : Union[str, Any] = checkpoint["""input_blocks.0.0.bias"""]
lowercase__ : Union[str, Any] = unet_config["""down_block_types"""]
lowercase__ : str = unet_config["""layers_per_block"""]
lowercase__ : Optional[int] = unet_config["""attention_head_dim"""]
lowercase__ : Optional[Any] = unet_config["""block_out_channels"""]
lowercase__ : Tuple = 1
lowercase__ : Optional[int] = channels_list[0]
for i, layer_type in enumerate(lowercase_ ):
lowercase__ : Any = channels_list[i]
lowercase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase_ ):
lowercase__ : Any = F'down_blocks.{i}.resnets.{j}'
lowercase__ : int = F'input_blocks.{current_layer}.0'
lowercase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowercase__ : Tuple = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase_ ):
lowercase__ : Optional[Any] = F'down_blocks.{i}.resnets.{j}'
lowercase__ : Union[str, Any] = F'input_blocks.{current_layer}.0'
lowercase__ : Any = True if j == 0 and downsample_block_has_skip else False
lowercase__ : Optional[int] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
lowercase__ : Optional[int] = F'down_blocks.{i}.attentions.{j}'
lowercase__ : int = F'input_blocks.{current_layer}.1'
lowercase__ : str = convert_attention(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
lowercase__ : Dict = F'down_blocks.{i}.downsamplers.0'
lowercase__ : Union[str, Any] = F'input_blocks.{current_layer}.0'
lowercase__ : List[str] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
lowercase__ : Optional[Any] = current_channels
# hardcoded the mid-block for now
lowercase__ : Any = """mid_block.resnets.0"""
lowercase__ : List[Any] = """middle_block.0"""
lowercase__ : Tuple = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = """mid_block.attentions.0"""
lowercase__ : Union[str, Any] = """middle_block.1"""
lowercase__ : Tuple = convert_attention(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = """mid_block.resnets.1"""
lowercase__ : List[Any] = """middle_block.2"""
lowercase__ : int = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = 0
lowercase__ : Union[str, Any] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowercase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowercase__ : List[str] = F'up_blocks.{i}.resnets.{j}'
lowercase__ : Dict = F'output_blocks.{current_layer}.0'
lowercase__ : str = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
lowercase__ : List[Any] = F'up_blocks.{i}.upsamplers.0'
lowercase__ : Any = F'output_blocks.{current_layer-1}.1'
lowercase__ : List[str] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowercase__ : Dict = F'up_blocks.{i}.resnets.{j}'
lowercase__ : Any = F'output_blocks.{current_layer}.0'
lowercase__ : List[str] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
lowercase__ : Tuple = F'up_blocks.{i}.attentions.{j}'
lowercase__ : Optional[Any] = F'output_blocks.{current_layer}.1'
lowercase__ : Any = convert_attention(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
lowercase__ : Any = F'up_blocks.{i}.upsamplers.0'
lowercase__ : Optional[int] = F'output_blocks.{current_layer-1}.2'
lowercase__ : int = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = checkpoint["""out.0.weight"""]
lowercase__ : Any = checkpoint["""out.0.bias"""]
lowercase__ : Any = checkpoint["""out.2.weight"""]
lowercase__ : Any = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Dict = strabool(args.class_cond)
lowerCamelCase__ : List[Any] = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase__ : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase__ : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase__ : Dict = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase__ : List[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase__ : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase__ : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase__ : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowerCamelCase__ : str = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase__ : Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 12 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = UnCLIPImageVariationPipeline
__lowerCamelCase = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
__lowerCamelCase = IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
__lowerCamelCase = False
@property
def _snake_case ( self ) -> Dict:
return 32
@property
def _snake_case ( self ) -> Optional[int]:
return 32
@property
def _snake_case ( self ) -> Any:
return self.time_input_dim
@property
def _snake_case ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> Any:
return 100
@property
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__A )
@property
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__A )
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str ={
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
SCREAMING_SNAKE_CASE_ : Any =UnCLIPTextProjModel(**__A )
return model
@property
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict ={
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
SCREAMING_SNAKE_CASE_ : str =UNetaDConditionModel(**__A )
return model
@property
def _snake_case ( self ) -> List[Any]:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _snake_case ( self ) -> List[str]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ : int =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.dummy_decoder
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_text_proj
SCREAMING_SNAKE_CASE_ : Any =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : List[Any] =self.dummy_super_res_first
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.dummy_super_res_last
SCREAMING_SNAKE_CASE_ : List[str] =UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
SCREAMING_SNAKE_CASE_ : Optional[int] =UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
SCREAMING_SNAKE_CASE_ : str =CLIPImageProcessor(crop_size=32 , size=32 )
SCREAMING_SNAKE_CASE_ : Tuple =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _snake_case ( self , __A , __A=0 , __A=True ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ : List[Any] =torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE_ : str =torch.Generator(device=__A ).manual_seed(__A )
if pil_image:
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : str =input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : List[str] =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : int =DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : Tuple ='''cpu'''
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict =self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : str =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(**__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : Any =pipe(
**__A , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Dict =np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] ='''cpu'''
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe(**__A )
SCREAMING_SNAKE_CASE_ : str =output.images
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : int =pipe(
**__A , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE_ : List[str] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : List[str] =np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[int] ='''cpu'''
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str =self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE_ : List[str] =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : List[str] =[
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
SCREAMING_SNAKE_CASE_ : int =pipe(**__A )
SCREAMING_SNAKE_CASE_ : str =output.images
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : Dict =[
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
SCREAMING_SNAKE_CASE_ : Any =pipe(
**__A , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE_ : Any =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Any =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : int =np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[str] =torch.device('''cpu''' )
class lowercase_ :
__lowerCamelCase = 1
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any =self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe.decoder.dtype
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1
SCREAMING_SNAKE_CASE_ : Any =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
SCREAMING_SNAKE_CASE_ : Dict =pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE_ : Dict =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE_ : Dict =self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(
**__A , decoder_latents=__A , super_res_latents=__A ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_inputs(__A , pil_image=__A )
# Don't pass image, instead pass embedding
SCREAMING_SNAKE_CASE_ : Optional[int] =pipeline_inputs.pop('''image''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.image_encoder(__A ).image_embeds
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe(
**__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
SCREAMING_SNAKE_CASE_ : str =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__A , expected_max_diff=__A )
@skip_mps
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
SCREAMING_SNAKE_CASE_ : Any =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , )
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : int =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
SCREAMING_SNAKE_CASE_ : Optional[int] =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__A )
@skip_mps
def _snake_case ( self ) -> Tuple:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _snake_case ( self ) -> Optional[Any]:
return super().test_save_load_local()
@skip_mps
def _snake_case ( self ) -> str:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : List[str] =pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE_ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =pipeline(
__A , generator=__A , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : str =output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__A , __A , 15 )
| 431 |
import copy
import re
class lowercase_ :
__lowerCamelCase = "hp"
__lowerCamelCase = {}
__lowerCamelCase = None
@classmethod
def _snake_case ( cls , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =prefix
SCREAMING_SNAKE_CASE_ : Optional[Any] =defaults
cls.build_naming_info()
@staticmethod
def _snake_case ( __A , __A ) -> str:
if len(__A ) == 0:
return ""
SCREAMING_SNAKE_CASE_ : Any =None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__A ) + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : int =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =''''''
while integer != 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE_ : Dict =0
while True:
SCREAMING_SNAKE_CASE_ : Optional[int] =word + '''#''' + int_to_alphabetic(__A )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : Tuple =sword
break
SCREAMING_SNAKE_CASE_ : List[str] =short_word
SCREAMING_SNAKE_CASE_ : str =word
return short_word
@staticmethod
def _snake_case ( __A , __A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =param_name.split('''_''' )
SCREAMING_SNAKE_CASE_ : str =[TrialShortNamer.shortname_for_word(__A , __A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE_ : Tuple =['''''', '''_''']
for separator in separators:
SCREAMING_SNAKE_CASE_ : Dict =separator.join(__A )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE_ : List[str] =shortname
SCREAMING_SNAKE_CASE_ : Any =param_name
return shortname
return param_name
@staticmethod
def _snake_case ( __A , __A ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TrialShortNamer.shortname_for_key(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =short_name
SCREAMING_SNAKE_CASE_ : Dict =param_name
@classmethod
def _snake_case ( cls ) -> Optional[int]:
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
SCREAMING_SNAKE_CASE_ : Tuple =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__A , __A )
SCREAMING_SNAKE_CASE_ : str =info
@classmethod
def _snake_case ( cls , __A ) -> List[str]:
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE_ : int =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE_ : Optional[int] =cls.NAMING_INFO['''short_param'''][k]
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE_ : List[Any] =1 if v else 0
SCREAMING_SNAKE_CASE_ : List[Any] ='''''' if isinstance(__A , (int, float) ) else '''-'''
SCREAMING_SNAKE_CASE_ : Optional[Any] =F'{key}{sep}{v}'
name.append(__A )
return "_".join(__A )
@classmethod
def _snake_case ( cls , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
else:
SCREAMING_SNAKE_CASE_ : Tuple =repr.split('''_''' )
SCREAMING_SNAKE_CASE_ : List[Any] ={}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =value.split('''-''' )
else:
SCREAMING_SNAKE_CASE_ : Any =re.sub('''[0-9.]''' , '''''' , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =float(re.sub('''[^0-9.]''' , '''''' , __A ) )
SCREAMING_SNAKE_CASE_ : List[Any] =cls.NAMING_INFO['''reverse_short_param'''][p_k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE_ : Tuple =cls.DEFAULTS[k]
return parameters
| 431 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str] = "data2vec-audio"
def __init__( self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=19 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=0.0_5 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="sum" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = feat_extract_activation
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = conv_pos_kernel_size
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# adapter
A__ = add_adapter
A__ = adapter_kernel_size
A__ = adapter_stride
A__ = num_adapter_layers
A__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = xvector_output_dim
@property
def snake_case__ ( self ) -> List[str]:
return math.prod(self.conv_stride )
| 104 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Any:
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
A__ = BlipaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def snake_case__ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> List[str]:
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> str:
A__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = self.prepare_image_inputs()
A__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = processor(text=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 104 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ['names', 'prefix']
_SCREAMING_SNAKE_CASE = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_SCREAMING_SNAKE_CASE = ['encoding_errors', 'on_bad_lines']
_SCREAMING_SNAKE_CASE = ['date_format']
@dataclass
class a ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase :str = ","
lowerCamelCase :Optional[str] = None
lowerCamelCase :Optional[Union[int, List[int], str]] = "infer"
lowerCamelCase :Optional[List[str]] = None
lowerCamelCase :Optional[List[str]] = None
lowerCamelCase :Optional[Union[int, str, List[int], List[str]]] = None
lowerCamelCase :Optional[Union[List[int], List[str]]] = None
lowerCamelCase :Optional[str] = None
lowerCamelCase :bool = True
lowerCamelCase :Optional[Literal["c", "python", "pyarrow"]] = None
lowerCamelCase :Dict[Union[int, str], Callable[[Any], Any]] = None
lowerCamelCase :Optional[list] = None
lowerCamelCase :Optional[list] = None
lowerCamelCase :bool = False
lowerCamelCase :Optional[Union[int, List[int]]] = None
lowerCamelCase :Optional[int] = None
lowerCamelCase :Optional[Union[str, List[str]]] = None
lowerCamelCase :bool = True
lowerCamelCase :bool = True
lowerCamelCase :bool = False
lowerCamelCase :bool = True
lowerCamelCase :Optional[str] = None
lowerCamelCase :str = "."
lowerCamelCase :Optional[str] = None
lowerCamelCase :str = '"'
lowerCamelCase :int = 0
lowerCamelCase :Optional[str] = None
lowerCamelCase :Optional[str] = None
lowerCamelCase :Optional[str] = None
lowerCamelCase :Optional[str] = None
lowerCamelCase :bool = True
lowerCamelCase :bool = True
lowerCamelCase :int = 0
lowerCamelCase :bool = True
lowerCamelCase :bool = False
lowerCamelCase :Optional[str] = None
lowerCamelCase :int = 10000
lowerCamelCase :Optional[datasets.Features] = None
lowerCamelCase :Optional[str] = "strict"
lowerCamelCase :Literal["error", "warn", "skip"] = "error"
lowerCamelCase :Optional[str] = None
def UpperCAmelCase ( self ) -> List[str]:
if self.delimiter is not None:
_A = self.delimiter
if self.column_names is not None:
_A = self.column_names
@property
def UpperCAmelCase ( self ) -> Any:
_A = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = CsvConfig
def UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
_A = data_files
if isinstance(__a , __a ):
_A = [files]
_A = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
_A = [files]
_A = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> pa.Table:
if self.config.features is not None:
_A = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
_A = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_A = table_cast(__a , __a )
return pa_table
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_A = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
_A = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
_A = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__a )}: {e}''' )
raise
| 707 | import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 83 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Return True if there is node that has not iterated.
_lowercase : List[str] = [False] * len(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = []
queue.append(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = True
while queue:
_lowercase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = True
_lowercase : Tuple = u
return visited[t]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
# This array is filled by BFS and to store path
_lowercase : int = [-1] * (len(SCREAMING_SNAKE_CASE ))
_lowercase : Any = 0
while bfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : str = float('Inf' )
_lowercase : str = sink
while s != source:
# Find the minimum value in select path
_lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
_lowercase : List[Any] = parent[s]
max_flow += path_flow
_lowercase : List[str] = sink
while v != source:
_lowercase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowercase : Tuple = parent[v]
return max_flow
UpperCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 66 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCamelCase :
a__ :Any = 42
a__ :List[Any] = None
a__ :List[Any] = None
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCamelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCamelCase )
)
return is_binary_search_tree_recursive_check(_UpperCamelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 | SCREAMING_SNAKE_CASE : List[Any] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 138 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: Dict = ComputeEnvironment.AMAZON_SAGEMAKER
__a: str = True
__a: Union[str, Any] = '''ml.p3.2xlarge'''
__a: Any = '''accelerate_sagemaker_execution_role'''
__a: Any = '''hf-sm'''
__a: int = '''us-east-1'''
__a: Tuple = 1
__a: Tuple = '''accelerate-sagemaker-1'''
__a: Optional[Any] = '''1.6'''
__a: Optional[int] = '''4.4'''
__a: Optional[Any] = '''train.py'''
__a: List[str] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
__a: Union[str, Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , lowercase_ )
assert isinstance(converted_args['do_train'] , lowercase_ )
assert isinstance(converted_args['epochs'] , lowercase_ )
assert isinstance(converted_args['learning_rate'] , lowercase_ )
assert isinstance(converted_args['max_steps'] , lowercase_ )
with pytest.raises(lowercase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 318 |
def lowerCamelCase ( a_ ) -> list:
lowerCAmelCase_ = len(a_ )
for _ in range(a_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase_ , lowerCAmelCase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase_ = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 318 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase__ = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ = jnp.array([tokenizer.encode(__lowercase )] )
UpperCAmelCase__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
UpperCAmelCase__ = model(__lowercase )["""last_hidden_state"""]
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowercase , atol=1e-3 ) )
| 422 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->str:
UpperCAmelCase__ = OmegaConf.load(_SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) )
return config
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if conf_path is None:
UpperCAmelCase__ = """./model_checkpoints/vqgan_only.yaml"""
UpperCAmelCase__ = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase__ = """./model_checkpoints/vqgan_only.pt"""
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
UpperCAmelCase__ = sd["""state_dict"""]
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
del sd
return model
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model.encode(_SCREAMING_SNAKE_CASE )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
UpperCAmelCase__ = model.decode(_SCREAMING_SNAKE_CASE )
return xrec
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->int:
UpperCAmelCase__ , UpperCAmelCase__ = string.rsplit(""".""" , 1 )
if reload:
UpperCAmelCase__ = importlib.import_module(_SCREAMING_SNAKE_CASE )
importlib.reload(_SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->str:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True ) ->str:
UpperCAmelCase__ = instantiate_from_config(_SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(_SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
# load the specified checkpoint
if ckpt:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
UpperCAmelCase__ = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
UpperCAmelCase__ = {"""state_dict""": None}
UpperCAmelCase__ = None
UpperCAmelCase__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )["""model"""]
return model, global_step
| 422 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
UpperCAmelCase_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ : List[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ : Optional[int] = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
UpperCAmelCase_ : Optional[Any] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ : List[Any] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 0 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
def get_matched_characters(UpperCamelCase__ , UpperCamelCase__ ) -> str:
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Optional[int] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase__ : Tuple = int(max(0 , i - limit ) )
UpperCamelCase__ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
UpperCamelCase__ : int = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
UpperCamelCase__ : int = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Union[str, Any] = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : List[str] = len(UpperCamelCase__ )
# transposition
UpperCamelCase__ : Tuple = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase__ : int = 0.0
else:
UpperCamelCase__ : int = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase__ : Optional[int] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 462 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase ={
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class a__:
a_ : Dict = LEDConfig
a_ : Optional[int] = {}
a_ : int = "gelu"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=4 , ) -> int:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_labels
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =intermediate_size
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =eos_token_id
snake_case__ =pad_token_id
snake_case__ =bos_token_id
snake_case__ =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case__ =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case__ =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ) -> Tuple:
snake_case__ =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ =tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case__ =prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case__ =tf.concat(
[tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , )
snake_case__ =global_attention_mask
return config, inputs_dict
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
snake_case__ =TFLEDModel(config=_UpperCAmelCase ).get_decoder()
snake_case__ =inputs_dict['''input_ids''']
snake_case__ =input_ids[:1, :]
snake_case__ =inputs_dict['''attention_mask'''][:1, :]
snake_case__ =1
# first forward pass
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
snake_case__ =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ =tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ =output_from_no_past[:, -3:, random_slice_idx]
snake_case__ =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> str:
if attention_mask is None:
snake_case__ =tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class a__( _lowercase , _lowercase , unittest.TestCase ):
a_ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a_ : str = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
a_ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : List[Any] = True
a_ : Optional[Any] = False
a_ : List[str] = False
a_ : Dict = False
def _lowercase ( self ) -> List[str]:
snake_case__ =TFLEDModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Tuple:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =tf.zeros_like(inputs_dict['attention_mask'] )
snake_case__ =2
snake_case__ =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
snake_case__ =True
snake_case__ =self.model_tester.seq_length
snake_case__ =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_UpperCAmelCase ):
snake_case__ =outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
snake_case__ =[t.numpy() for t in outputs.encoder_attentions]
snake_case__ =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case__ =True
snake_case__ =False
snake_case__ =False
snake_case__ =model_class(_UpperCAmelCase )
snake_case__ =model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ =len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
snake_case__ =model_class(_UpperCAmelCase )
snake_case__ =model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ =True
snake_case__ =model_class(_UpperCAmelCase )
snake_case__ =model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
snake_case__ =True
snake_case__ =True
snake_case__ =model_class(_UpperCAmelCase )
snake_case__ =model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _lowercase ( self ) -> int:
pass
def _lowercase ( self ) -> str:
# TODO: Head-masking not yet implement
pass
def a ( UpperCamelCase_ : Optional[int] ) -> str:
return tf.constant(_lowercase , dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ : Dict = 1e-4
@slow
@require_tf
class a__( unittest.TestCase ):
def _lowercase ( self ) -> Any:
snake_case__ =TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
snake_case__ =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
snake_case__ =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
snake_case__ =prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
snake_case__ =model(**_UpperCAmelCase )[0]
snake_case__ =(1, 1024, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
snake_case__ =tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-3 )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
snake_case__ =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
snake_case__ =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
snake_case__ =prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
snake_case__ =model(**_UpperCAmelCase )[0]
snake_case__ =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
snake_case__ =tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-3 , rtol=1E-3 )
| 538 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowercase , _lowercase=False ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = OmegaConf.load(_lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowercase ) ) )
return config
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
if conf_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase_ : Dict = load_config(_lowercase , display=_lowercase )
lowerCamelCase_ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase_ : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase )
if ".ckpt" in ckpt_path:
lowerCamelCase_ : str = sd['''state_dict''']
model.load_state_dict(_lowercase , strict=_lowercase )
model.to(_lowercase )
del sd
return model
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = model.encode(_lowercase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowerCamelCase_ : Any = model.decode(_lowercase )
return xrec
def lowercase_ ( _lowercase , _lowercase=False ) -> Any:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Any = string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase_ : int = importlib.import_module(_lowercase )
importlib.reload(_lowercase )
return getattr(importlib.import_module(_lowercase , package=_lowercase ) , cls )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase_ ( _lowercase , _lowercase , _lowercase=True , _lowercase=True ) -> Any:
'''simple docstring'''
lowerCamelCase_ : int = instantiate_from_config(_lowercase )
if sd is not None:
model.load_state_dict(_lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if ckpt:
lowerCamelCase_ : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
lowerCamelCase_ : int = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""" )
else:
lowerCamelCase_ : Optional[int] = {'''state_dict''': None}
lowerCamelCase_ : str = None
lowerCamelCase_ : Any = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_lowercase , eval_mode=_lowercase )['''model''']
return model, global_step
| 422 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
a__ = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 )
a__ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def lowercase__ ( self , _a , _a ):
"""simple docstring"""
for example in examples:
a__ = video_classifier(_a )
self.assertEqual(
_a , [
{'score': ANY(_a ), 'label': ANY(_a )},
{'score': ANY(_a ), 'label': ANY(_a )},
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
a__ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
a__ = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
a__ = pipeline(
'video-classification' , model=_a , feature_extractor=_a , frame_sampling_rate=4 )
a__ = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
a__ = video_classifier(_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
a__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
pass
| 721 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE:List[Any] = 10
def lowercase__ ( self , **_a ):
"""simple docstring"""
a__ = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_a )
return config
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def lowercase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def lowercase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(prediction_type='v_prediction' )
a__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(_a , _a )
a__ = model(_a , _a )
a__ = scheduler.step(_a , _a , _a )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(_a ) )
a__ = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
if torch_device == "mps":
return
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(_a , _a )
a__ = model(_a , _a )
a__ = scheduler.step(_a , _a , _a )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(_a ) )
a__ = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
if torch_device == "mps":
return
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ = scheduler.scale_model_input(_a , _a )
a__ = model(_a , _a )
a__ = scheduler.step(_a , _a , _a )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(_a ) )
a__ = torch.mean(torch.abs(_a ) )
if str(_a ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 126 | 0 |
"""simple docstring"""
lowercase__ = 8.3144598
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase__ = 300
lowercase__ = 28
lowercase__ = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 610 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__: Union[str, Any] = logging.get_logger(__name__)
class snake_case_ ( _UpperCamelCase ):
__lowerCamelCase : Dict = ['input_features']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16_000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : List[str] = n_fft
SCREAMING_SNAKE_CASE_ : Any = hop_length
SCREAMING_SNAKE_CASE_ : Dict = chunk_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE_ : Any = self.n_samples // hop_length
SCREAMING_SNAKE_CASE_ : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__lowerCAmelCase , norm='slaney' , mel_scale='slaney' , )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
SCREAMING_SNAKE_CASE_ : Tuple = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ : Any = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 ):
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : List[str] = np.array(__lowerCAmelCase , np.intaa )
SCREAMING_SNAKE_CASE_ : List[str] = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE_ : List[Any] = padding_value
normed_input_values.append(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE_ : str = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE_ : Any = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Tuple = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE_ : List[str] = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : List[Any] = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE_ : Dict = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE_ : str = [self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE_ : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : str = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__: Optional[Any] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: str = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Any = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase__: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 311 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCamelCase_( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = 1_0_1 ):
_lowerCamelCase = length
def __len__( self ):
return self.length
def __getitem__( self , lowerCamelCase__ ):
return i
class lowerCamelCase_:
'''simple docstring'''
def __call__( self , lowerCamelCase__ ):
return {"input_ids": torch.tensor(_lowerCAmelCase ), "labels": torch.tensor(_lowerCAmelCase )}
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase = nn.Linear(1_2_0 , 8_0 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCamelCase_( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_neuroncore
def snake_case__ ( self ):
_lowerCamelCase = F"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = F"""--output_dir {output_dir}""".split()
_lowerCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCamelCase_( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_multi_gpu
def snake_case__ ( self ):
_lowerCamelCase = F"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = F"""--output_dir {output_dir}""".split()
_lowerCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__SCREAMING_SNAKE_CASE : int = HfArgumentParser((TrainingArguments,))
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
__SCREAMING_SNAKE_CASE : str = DummyDataset(dataset_length)
def lowerCAmelCase_( lowercase_ : EvalPrediction ) -> Dict:
_lowerCamelCase = list(range(len(__UpperCAmelCase ) ) )
_lowerCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__SCREAMING_SNAKE_CASE : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__SCREAMING_SNAKE_CASE : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__SCREAMING_SNAKE_CASE : List[str] = 2
__SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__SCREAMING_SNAKE_CASE : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
| 661 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_a : List[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = self.dummy_uncond_unet
_a : List[str] = ScoreSdeVeScheduler()
_a : Any = ScoreSdeVePipeline(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
sde_ve.to(lowerCamelCase_ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase_ )
_a : Tuple = torch.manual_seed(0 )
_a : Tuple = sde_ve(num_inference_steps=2 ,output_type='numpy' ,generator=lowerCamelCase_ ).images
_a : str = torch.manual_seed(0 )
_a : int = sde_ve(num_inference_steps=2 ,output_type='numpy' ,generator=lowerCamelCase_ ,return_dict=lowerCamelCase_ )[
0
]
_a : Dict = image[0, -3:, -3:, -1]
_a : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_a : Any = """google/ncsnpp-church-256"""
_a : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase_ )
_a : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase_ )
_a : Union[str, Any] = ScoreSdeVePipeline(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
sde_ve.to(lowerCamelCase_ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase_ )
_a : Union[str, Any] = torch.manual_seed(0 )
_a : List[Any] = sde_ve(num_inference_steps=10 ,output_type='numpy' ,generator=lowerCamelCase_ ).images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 713 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_a : Optional[Any] = 1
_a : str = 1
while repunit:
_a : Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 319 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 315 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
class __SCREAMING_SNAKE_CASE ( nn.Module ):
_UpperCAmelCase : int
_UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
_UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict = self.block_out_channels[i]
lowerCamelCase__ : Union[str, Any] = self.block_out_channels[i + 1]
lowerCamelCase__ : Dict = nn.Conv(
A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A )
lowerCamelCase__ : Union[str, Any] = nn.Conv(
A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A )
lowerCamelCase__ : int = blocks
lowerCamelCase__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , A : Dict ) ->int:
lowerCamelCase__ : Tuple = self.conv_in(A )
lowerCamelCase__ : Dict = nn.silu(A )
for block in self.blocks:
lowerCamelCase__ : Optional[int] = block(A )
lowerCamelCase__ : Optional[int] = nn.silu(A )
lowerCamelCase__ : Union[str, Any] = self.conv_out(A )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module ,lowerCAmelCase_ ,lowerCAmelCase_ ):
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase : Union[bool, Tuple[bool]] = False
_UpperCAmelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[int, Tuple[int]] = 8
_UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase : int = 1_2_8_0
_UpperCAmelCase : float = 0.0
_UpperCAmelCase : bool = False
_UpperCAmelCase : jnp.dtype = jnp.floataa
_UpperCAmelCase : bool = True
_UpperCAmelCase : int = 0
_UpperCAmelCase : str = "rgb"
_UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def __lowerCamelCase ( self : int , A : jax.random.KeyArray ) ->FrozenDict:
# init input tensors
lowerCamelCase__ : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[Any] = jnp.zeros(A , dtype=jnp.floataa )
lowerCamelCase__ : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : List[str] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ : int = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Union[str, Any] = jnp.zeros(A , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = jax.random.split(A )
lowerCamelCase__ : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A , A , A , A , A )["params"]
def __lowerCamelCase ( self : List[Any] ) ->Dict:
lowerCamelCase__ : int = self.block_out_channels
lowerCamelCase__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(A , dtype=self.dtype )
lowerCamelCase__ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(A , A ):
lowerCamelCase__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A , A ):
lowerCamelCase__ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Optional[int] = block_out_channels[0]
lowerCamelCase__ : Tuple = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Any = output_channel
lowerCamelCase__ : int = block_out_channels[i]
lowerCamelCase__ : Dict = i == len(A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : str = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A )
if not is_final_block:
lowerCamelCase__ : int = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A )
lowerCamelCase__ : Union[str, Any] = down_blocks
lowerCamelCase__ : Tuple = controlnet_down_blocks
# mid
lowerCamelCase__ : Dict = block_out_channels[-1]
lowerCamelCase__ : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ : int = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , A : Dict , A : Tuple , A : List[str] , A : int , A : float = 1.0 , A : bool = True , A : bool = False , ) ->Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : Union[str, Any] = jnp.flip(A , axis=1 )
# 1. time
if not isinstance(A , jnp.ndarray ):
lowerCamelCase__ : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : str = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Optional[Any] = jnp.expand_dims(A , 0 )
lowerCamelCase__ : Dict = self.time_proj(A )
lowerCamelCase__ : List[Any] = self.time_embedding(A )
# 2. pre-process
lowerCamelCase__ : int = jnp.transpose(A , (0, 2, 3, 1) )
lowerCamelCase__ : List[Any] = self.conv_in(A )
lowerCamelCase__ : Dict = jnp.transpose(A , (0, 2, 3, 1) )
lowerCamelCase__ : List[str] = self.controlnet_cond_embedding(A )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : List[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = down_block(A , A , A , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : str = down_block(A , A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Any = self.mid_block(A , A , A , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] = ()
for down_block_res_sample, controlnet_block in zip(A , self.controlnet_down_blocks ):
lowerCamelCase__ : str = controlnet_block(A )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = controlnet_down_block_res_samples
lowerCamelCase__ : Optional[int] = self.controlnet_mid_block(A )
# 6. scaling
lowerCamelCase__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A , mid_block_res_sample=A )
| 315 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_UpperCamelCase : Dict ={
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str =["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str =[
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : Tuple ={
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = """ylacombe/bark-small"""
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : Any = """en_speaker_1"""
lowercase__ : Optional[int] = """This is a test string"""
lowercase__ : Tuple = """speaker_embeddings_path.json"""
lowercase__ : str = """speaker_embeddings"""
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : int = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_)
processor.save_pretrained(self.tmpdirname)
lowercase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowercase__ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ : Optional[int] = 35
lowercase__ : Tuple = 2
lowercase__ : Dict = 8
lowercase__ : Optional[int] = {
"""semantic_prompt""": np.ones(SCREAMING_SNAKE_CASE_),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len)),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
lowercase__ : Tuple = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist())
# test loading voice preset from npz file
lowercase__ : List[Any] = os.path.join(self.tmpdirname , """file.npz""")
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : str = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist())
# test loading voice preset from the hub
lowercase__ : int = processor(text=self.input_string , voice_preset=self.voice_preset)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : str = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = processor(text=self.input_string)
lowercase__ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 12 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case)
class _UpperCamelCase :
def __call__(self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
A__ = titles if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [titles]
A__ = texts if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [texts]
A__ = len(lowerCamelCase__ )
A__ = questions if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [questions] * n_passages
assert len(lowerCamelCase__ ) == len(
lowerCamelCase__ ), F"""There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts."""
A__ = super().__call__(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )["""input_ids"""]
A__ = super().__call__(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )["""input_ids"""]
A__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ , lowerCamelCase__ )
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ = attention_mask
return self.pad(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_6 , lowerCamelCase__ = 6_4 , lowerCamelCase__ = 4 , ):
"""simple docstring"""
A__ = reader_input["""input_ids"""]
A__ ,A__ ,A__ = reader_output[:3]
A__ = len(lowerCamelCase__ )
A__ = sorted(range(lowerCamelCase__ ) , reverse=lowerCamelCase__ , key=relevance_logits.__getitem__ )
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id )
else:
A__ = len(lowerCamelCase__ )
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase__ , top_spans=lowerCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase__ , start_index=lowerCamelCase__ , end_index=lowerCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
A__ = []
for start_index, start_score in enumerate(lowerCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] , reverse=lowerCamelCase__ )
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case)
class _UpperCamelCase ( __snake_case , __snake_case):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["input_ids", "attention_mask"]
__lowerCamelCase = DPRReaderTokenizer
| 574 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | """simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int=0 ) -> List[str]:
# Format the message.
if name is None:
_lowerCAmelCase : Optional[Any] = None
else:
_lowerCAmelCase : int = """.""" * max(0 ,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_lowerCAmelCase : int = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase ,val[k] ,spaces + 2 )
elif isinstance(_lowerCamelCase ,torch.Tensor ):
print(_lowerCamelCase ,""":""" ,val.size() )
else:
print(_lowerCamelCase ,""":""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ) -> int:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCAmelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : Tuple = param.view(*_lowerCamelCase )
_lowerCAmelCase : str = param.transpose(0 ,2 )
_lowerCAmelCase : str = param.transpose(1 ,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : str = param.view(*_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = param.transpose(0 ,1 ).contiguous()
_lowerCAmelCase : Optional[Any] = param.view(*_lowerCamelCase )
return param
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ) -> Any:
# The converted output model.
_lowerCAmelCase : Optional[int] = {}
# old versions did not store training args
_lowerCAmelCase : Dict = input_state_dict.get("""args""" ,_lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : Optional[Any] = ds_args.padded_vocab_size
_lowerCAmelCase : Tuple = ds_args.max_position_embeddings
_lowerCAmelCase : Optional[Any] = ds_args.hidden_size
_lowerCAmelCase : Union[str, Any] = ds_args.num_layers
_lowerCAmelCase : Dict = ds_args.num_attention_heads
_lowerCAmelCase : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : List[str] = config.n_head
# The hidden_size per head.
_lowerCAmelCase : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : Tuple = input_state_dict["""checkpoint_version"""]
else:
_lowerCAmelCase : Union[str, Any] = 0.0
# The model.
_lowerCAmelCase : Any = input_state_dict["""model"""]
# The language model.
_lowerCAmelCase : Any = model["""language_model"""]
# The embeddings.
_lowerCAmelCase : Union[str, Any] = lm["""embedding"""]
# The word embeddings.
_lowerCAmelCase : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : Dict = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : Optional[int] = word_embeddings
# The position embeddings.
_lowerCAmelCase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_lowerCAmelCase : Optional[Any] = pos_embeddings
# The transformer.
_lowerCAmelCase : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_lowerCAmelCase : Any = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_lowerCAmelCase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : Tuple = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : Tuple = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : List[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase : str = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_lowerCAmelCase : Optional[Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_lowerCAmelCase : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view(
1 ,1 ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : Dict = torch.tensor(-1e4 ,dtype=torch.floataa )
_lowerCAmelCase : Dict = masked_bias
_lowerCAmelCase : List[Any] = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : int = out_val.transpose(0 ,1 ).contiguous()
# Store.
_lowerCAmelCase : List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : Union[str, Any] = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Any = megatron_to_transformers[op_name]
_lowerCAmelCase : Optional[Any] = val.transpose(0 ,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : str = megatron_to_transformers[op_name]
_lowerCAmelCase : Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : int = transformer["""final_layernorm.weight"""]
_lowerCAmelCase : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : int = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
# Create the argument parser.
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" ,action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" ,type=_lowerCamelCase ,help="""Path to the checkpoint file (.zip archive or direct .pt file)""" ,)
parser.add_argument(
"""--config_file""" ,default="""""" ,type=_lowerCamelCase ,help="""An optional config json file describing the pre-trained model.""" ,)
_lowerCAmelCase : List[Any] = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint ,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_lowerCAmelCase : Any = torch.load(_lowerCamelCase ,map_location="""cpu""" )
else:
_lowerCAmelCase : Optional[int] = torch.load(args.path_to_checkpoint ,map_location="""cpu""" )
_lowerCAmelCase : Optional[int] = input_state_dict.get("""args""" ,_lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : Optional[Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_lowerCAmelCase : Any = """gelu_new"""
else:
_lowerCAmelCase : str = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : Tuple = GPTaConfig(
vocab_size=50257 ,n_positions=1024 ,n_embd=1024 ,n_layer=24 ,n_head=16 ,n_inner=4096 ,activation_function=_lowerCamelCase ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1e-5 ,initializer_range=0.02 ,summary_type="""cls_index""" ,summary_use_proj=_lowerCamelCase ,summary_activation=_lowerCamelCase ,summary_proj_to_labels=_lowerCamelCase ,summary_first_dropout=0.1 ,scale_attn_weights=_lowerCamelCase ,use_cache=_lowerCamelCase ,bos_token_id=50256 ,eos_token_id=50256 ,)
else:
_lowerCAmelCase : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_lowerCAmelCase : Tuple = convert_megatron_checkpoint(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase ,_lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : Dict = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_lowerCAmelCase : Optional[Any] = """gpt2"""
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = type(_lowerCamelCase ).__name__
_lowerCAmelCase : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase ,"""pytorch_model.bin""" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_lowerCamelCase ,_lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 213 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: int = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCAmelCase_: str = DDIMScheduler()
UpperCAmelCase_: Optional[Any] = self.dummy_vq_model
UpperCAmelCase_: Union[str, Any] = LDMPipeline(unet=UpperCamelCase_ , vqvae=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase_: Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_: Dict = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="numpy" ).images
UpperCAmelCase_: List[str] = torch.manual_seed(0 )
UpperCAmelCase_: Any = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="numpy" , return_dict=UpperCamelCase_ )[0]
UpperCAmelCase_: int = image[0, -3:, -3:, -1]
UpperCAmelCase_: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_: Optional[int] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
UpperCAmelCase_: str = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: Dict = ldm(generator=UpperCamelCase_ , num_inference_steps=5 , output_type="numpy" ).images
UpperCAmelCase_: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_: List[str] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
UpperCAmelCase_: List[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 710 |
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(["""image"""])
_lowerCAmelCase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""image"""])
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """image""", """negative_prompt"""])
_lowerCAmelCase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_lowerCAmelCase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""image""", """mask_image"""])
_lowerCAmelCase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""example_image""", """image""", """mask_image"""])
_lowerCAmelCase = frozenset(["""class_labels"""])
_lowerCAmelCase = frozenset(["""class_labels"""])
_lowerCAmelCase = frozenset(["""batch_size"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(["""batch_size"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""])
_lowerCAmelCase = frozenset(["""input_tokens"""])
_lowerCAmelCase = frozenset(["""input_tokens"""]) | 306 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] ={
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class A_ ( __a ):
_A :Optional[Any] = '''funnel'''
_A :Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : List[str] , snake_case__ : Dict=3_05_22 , snake_case__ : Dict=[4, 4, 4] , snake_case__ : Dict=None , snake_case__ : List[Any]=2 , snake_case__ : Any=7_68 , snake_case__ : List[Any]=12 , snake_case__ : List[str]=64 , snake_case__ : Union[str, Any]=30_72 , snake_case__ : Dict="gelu_new" , snake_case__ : str=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]=0.0 , snake_case__ : int=0.1 , snake_case__ : Any=None , snake_case__ : Union[str, Any]=1E-9 , snake_case__ : List[str]="mean" , snake_case__ : Tuple="relative_shift" , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , **snake_case__ : Any , ):
lowercase = vocab_size
lowercase = block_sizes
lowercase = [1] * len(snake_case__ ) if block_repeats is None else block_repeats
assert len(snake_case__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase = num_decoder_layers
lowercase = d_model
lowercase = n_head
lowercase = d_head
lowercase = d_inner
lowercase = hidden_act
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = initializer_range
lowercase = initializer_std
lowercase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
lowercase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
lowercase = attention_type
lowercase = separate_cls
lowercase = truncate_seq
lowercase = pool_q_only
super().__init__(**snake_case__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return len(self.block_sizes )
@num_blocks.setter
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : str ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 428 |
import copy
import random
from transformers import CLIPTokenizer
class A_ ( __a ):
def __init__( self : Tuple , *snake_case__ : Any , **snake_case__ : Tuple ):
super().__init__(*snake_case__ , **snake_case__ )
lowercase = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Any , *snake_case__ : Tuple , **snake_case__ : str ):
lowercase = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Any , *snake_case__ : int , snake_case__ : List[str]=1 , **snake_case__ : str ):
lowercase = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
lowercase = []
for i in range(snake_case__ ):
lowercase = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowercase = output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : int , snake_case__ : int=False , snake_case__ : str=1.0 ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase = self.token_map[placeholder_token]
lowercase = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
lowercase = text.replace(snake_case__ , """ """.join(snake_case__ ) )
return text
def __call__( self : Optional[Any] , snake_case__ : str , *snake_case__ : Any , snake_case__ : Dict=False , snake_case__ : Any=1.0 , **snake_case__ : Any ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : Tuple , snake_case__ : Dict=False , snake_case__ : Optional[Any]=1.0 , **snake_case__ : Tuple ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 428 | 1 |
"""simple docstring"""
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]=None ) -> Any:
if subparsers is not None:
_lowerCAmelCase : Optional[Any] = subparsers.add_parser("""test""" )
else:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" ,default=a_ ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) ,)
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
_lowerCAmelCase : List[Any] = script_name
else:
_lowerCAmelCase : Dict = f"--config_file={args.config_file} {script_name}"
_lowerCAmelCase : Optional[Any] = ['''accelerate-launch'''] + test_args.split()
_lowerCAmelCase : List[Any] = execute_subprocess_async(a_ ,env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : Optional[Any] = test_command_parser()
_lowerCAmelCase : Dict = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 718 | """simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted)) | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : int
__a : Node | None = None
__a : Node | None = None
def __magic_name__ ( ) -> Node | None:
"""simple docstring"""
lowercase_ : int = Node(1 )
lowercase_ : Optional[int] = Node(2 )
lowercase_ : Any = Node(3 )
lowercase_ : Tuple = Node(4 )
lowercase_ : Any = Node(5 )
return tree
def __magic_name__ ( lowercase ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __magic_name__ ( lowercase ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __magic_name__ ( lowercase ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __magic_name__ ( lowercase ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase_ : list[Any] = []
if root is None:
return output
lowercase_ : Optional[Any] = deque([root] )
while process_queue:
lowercase_ : Any = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __magic_name__ ( lowercase , lowercase ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase_ : list[Any] = []
def populate_output(lowercase , lowercase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowercase , lowercase )
return output
def __magic_name__ ( lowercase , lowercase ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase_ : list[Any] = []
def populate_output(lowercase , lowercase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowercase , lowercase )
return output
def __magic_name__ ( lowercase ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase_ : list[Sequence[Node | None]] = []
lowercase_ : List[Any] = 0
lowercase_ : List[str] = height(lowercase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase , lowercase ) )
lowercase_ : Dict = 1
else:
output.append(get_nodes_from_right_to_left(lowercase , lowercase ) )
lowercase_ : Any = 0
return output
def __magic_name__ ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase_ : Optional[Any] = make_tree()
print(f"""In-order Traversal: {inorder(lowercase )}""" )
print(f"""Pre-order Traversal: {preorder(lowercase )}""" )
print(f"""Post-order Traversal: {postorder(lowercase )}""" , """\n""" )
print(f"""Height of Tree: {height(lowercase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowercase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowercase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowercase , level=lowercase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 436 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowercase_ : Optional[int] = bs[:]
lowercase_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowercase_ : int = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def __magic_name__ ( lowercase ) -> Optional[int]:
"""simple docstring"""
lowercase_ : str = set()
lowercase_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : List[str] = char
return pairs
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self, snake_case__, snake_case__, snake_case__="replace", snake_case__="<s>", snake_case__="</s>", snake_case__="</s>", snake_case__="<s>", snake_case__="<unk>", snake_case__="<pad>", snake_case__="<mask>", snake_case__=False, **snake_case__, ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else bos_token
lowercase_ : List[Any] = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else eos_token
lowercase_ : Dict = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else sep_token
lowercase_ : Union[str, Any] = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else cls_token
lowercase_ : List[Any] = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else unk_token
lowercase_ : List[str] = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Tuple = AddedToken(snake_case__, lstrip=snake_case__, rstrip=snake_case__ ) if isinstance(snake_case__, snake_case__ ) else mask_token
super().__init__(
errors=snake_case__, bos_token=snake_case__, eos_token=snake_case__, unk_token=snake_case__, sep_token=snake_case__, cls_token=snake_case__, pad_token=snake_case__, mask_token=snake_case__, add_prefix_space=snake_case__, **snake_case__, )
with open(snake_case__, encoding="""utf-8""" ) as vocab_handle:
lowercase_ : Optional[Any] = json.load(snake_case__ )
lowercase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = errors # how to handle errors in decoding
lowercase_ : Dict = bytes_to_unicode()
lowercase_ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__, encoding="""utf-8""" ) as merges_handle:
lowercase_ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowercase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[Any] = dict(zip(snake_case__, range(len(snake_case__ ) ) ) )
lowercase_ : str = {}
lowercase_ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return len(self.encoder )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case__ ( self, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(snake_case__ )
lowercase_ : Optional[Any] = get_pairs(snake_case__ )
if not pairs:
return token
while True:
lowercase_ : Optional[Any] = min(snake_case__, key=lambda snake_case__ : self.bpe_ranks.get(snake_case__, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : int = bigram
lowercase_ : Tuple = []
lowercase_ : Dict = 0
while i < len(snake_case__ ):
try:
lowercase_ : List[Any] = word.index(snake_case__, snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : str = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Optional[int] = tuple(snake_case__ )
lowercase_ : Optional[int] = new_word
if len(snake_case__ ) == 1:
break
else:
lowercase_ : List[str] = get_pairs(snake_case__ )
lowercase_ : Union[str, Any] = """ """.join(snake_case__ )
lowercase_ : str = word
return word
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : Tuple = []
for token in re.findall(self.pat, snake_case__ ):
lowercase_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(""" """ ) )
return bpe_tokens
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(snake_case__, self.encoder.get(self.unk_token ) )
def snake_case__ ( self, snake_case__ ) -> Any:
"""simple docstring"""
return self.decoder.get(snake_case__ )
def snake_case__ ( self, snake_case__ ) -> Any:
"""simple docstring"""
lowercase_ : str = """""".join(snake_case__ )
lowercase_ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""", errors=self.errors )
return text
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
snake_case__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Union[str, Any] = os.path.join(
snake_case__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case__, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=snake_case__, ensure_ascii=snake_case__ ) + """\n""" )
lowercase_ : Optional[int] = 0
with open(snake_case__, """w""", encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowercase_ : Optional[Any] = token_index
writer.write(""" """.join(snake_case__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
lowercase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__, token_ids_a=snake_case__, already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> List[int]:
"""simple docstring"""
lowercase_ : List[str] = [self.sep_token_id]
lowercase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self, snake_case__, snake_case__=False, **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : int = kwargs.pop("""add_prefix_space""", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
lowercase_ : str = """ """ + text
return (text, kwargs) | 436 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A_ = logging.get_logger(__name__)
A_ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'whisper'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = max_source_positions
lowerCamelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
lowerCamelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
lowerCamelCase_ = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase_ = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase_ = {0: 'batch'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' )
return common_inputs
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = encoder_inputs['input_features'].shape[2]
lowerCamelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoder_inputs.pop('input_features' )
lowerCamelCase_ = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
lowerCamelCase_ = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCamelCase( self ) -> float:
'''simple docstring'''
return 1E-3
| 42 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 1 |
def _lowerCamelCase ( snake_case , snake_case ):
_lowerCAmelCase = len(snake_case )
_lowerCAmelCase = len(snake_case )
_lowerCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCAmelCase = True
for i in range(snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCAmelCase = True
if a[i].islower():
_lowerCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225 | import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Optional[int] = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
_lowerCAmelCase = os.path.abspath(snake_case )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_lowerCAmelCase = tf.train.list_variables(snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_lowerCAmelCase = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
_lowerCAmelCase = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
_lowerCAmelCase = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(F'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
_lowerCAmelCase = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
_lowerCAmelCase = full_name.split('/' )
_lowerCAmelCase = model
_lowerCAmelCase = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
_lowerCAmelCase = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
_lowerCAmelCase = getattr(snake_case , 'embeddings' )
_lowerCAmelCase = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
_lowerCAmelCase = getattr(snake_case , 'encoder' )
_lowerCAmelCase = getattr(snake_case , 'layer' )
_lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'pooler' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
_lowerCAmelCase = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
_lowerCAmelCase = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
_lowerCAmelCase = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
_lowerCAmelCase = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
_lowerCAmelCase = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
_lowerCAmelCase = getattr(snake_case , 'attention' )
_lowerCAmelCase = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
_lowerCAmelCase = getattr(snake_case , 'attention' )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'attention' )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
_lowerCAmelCase = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
_lowerCAmelCase = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
_lowerCAmelCase = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'intermediate' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
_lowerCAmelCase = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
_lowerCAmelCase = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
_lowerCAmelCase = getattr(snake_case , 'weight' )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
_lowerCAmelCase = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
_lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
_lowerCAmelCase = torch.from_numpy(snake_case )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
# Instantiate model
logger.info(F'Loading model based on config from {config_path}...' )
_lowerCAmelCase = BertConfig.from_json_file(snake_case )
_lowerCAmelCase = BertModel(snake_case )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
_lowercase: str = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowercase: Optional[int] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 225 | 1 |
"""simple docstring"""
import os
__lowerCAmelCase : List[str] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = 0
snake_case_ : Tuple = 0
while index < len(__UpperCamelCase ) - 1:
snake_case_ : Any = SYMBOLS[numerals[index]]
snake_case_ : Dict = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """"""
snake_case_ : Optional[Any] = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ : List[str] = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ : List[str] = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowerCAmelCase ( __UpperCamelCase : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ : Optional[Any] = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
snake_case_ : Any = filea.readlines()
for line in lines:
snake_case_ : Tuple = line.strip()
snake_case_ : Optional[int] = parse_roman_numerals(__UpperCamelCase )
snake_case_ : Tuple = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[Any] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 302 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _UpperCAmelCase :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( *snake_case_ , **snake_case_ ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ : int = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase_ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = pipeline(
'document-question-answering' , model=snake_case_ , tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : List[str] = INVOICE_URL
A_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , '' ) ) )
A_ : Dict = 'What is the placebo?'
A_ : Optional[Any] = [
{
'image': load_image(snake_case_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = dqa_pipeline(snake_case_ , top_k=2 )
self.assertEqual(
snake_case_ , [
[
{'score': ANY(snake_case_ ), 'answer': ANY(snake_case_ ), 'start': ANY(snake_case_ ), 'end': ANY(snake_case_ )},
{'score': ANY(snake_case_ ), 'answer': ANY(snake_case_ ), 'start': ANY(snake_case_ ), 'end': ANY(snake_case_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
A_ : Optional[Any] = INVOICE_URL
A_ : Union[str, Any] = 'How many cats are there?'
A_ : List[Any] = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
A_ : List[str] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
A_ : Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A_ : Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
A_ : str = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
# We can optionnally pass directly the words and bounding boxes
A_ : int = './tests/fixtures/tests_samples/COCO/000000039769.png'
A_ : Optional[int] = []
A_ : Union[str, Any] = []
A_ : List[Any] = dqa_pipeline(image=snake_case_ , question=snake_case_ , words=snake_case_ , boxes=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
A_ : int = INVOICE_URL
A_ : List[Any] = 'What is the invoice number?'
A_ : str = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : List[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : str = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
A_ : Any = INVOICE_URL
A_ : Any = 'What is the invoice number?'
A_ : int = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : str = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : str = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case_ )
A_ : Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case_ , revision='3dc6de3' , )
A_ : Any = INVOICE_URL
A_ : str = 'What is the invoice number?'
A_ : Optional[int] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
A_ : Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
A_ : Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
A_ : List[Any] = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , '' ) ) )
# This model should also work if `image` is set to None
A_ : Dict = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case_ )
A_ : Any = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case_ , revision='3dc6de3' , max_seq_len=5_0 , )
A_ : str = INVOICE_URL
A_ : List[str] = 'What is the invoice number?'
A_ : Tuple = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
A_ : Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
A_ : Dict = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , '' ) ) )
# This model should also work if `image` is set to None
A_ : Dict = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
A_ : List[Any] = INVOICE_URL
A_ : List[str] = 'What is the invoice number?'
A_ : List[Any] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass | 302 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
lowerCAmelCase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = ["input_ids", "attention_mask"]
snake_case__ : List[int] = []
snake_case__ : List[int] = []
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : List[Any]="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : List[Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = len(self.sp_model )
_lowerCamelCase : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCAmelCase )
}
_lowerCamelCase : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase : Tuple = src_lang if src_lang is not None else '''en_XX'''
_lowerCamelCase : Optional[int] = self.lang_code_to_id[self._src_lang]
_lowerCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self : Optional[int] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Dict = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = ''''''
_lowerCamelCase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCamelCase : Dict = True
_lowerCamelCase : str = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : str = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
_lowerCamelCase : int = [1] * len(self.prefix_tokens )
_lowerCamelCase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase : int = src_lang
_lowerCamelCase : Tuple = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : int = self.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en_XX" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro_RO" , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = self.lang_code_to_id[src_lang]
_lowerCamelCase : Tuple = [self.cur_lang_code_id]
_lowerCamelCase : str = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : List[Any] = [self.cur_lang_code_id]
_lowerCamelCase : Any = [self.eos_token_id]
| 83 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase__ ) , torch_builtin(lowerCamelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase__ ) , gelu_new(lowerCamelCase__ ) ) )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase = get_activation('gelu' )
__lowerCamelCase = get_activation('gelu_10' )
__lowerCamelCase = torch_builtin(lowerCamelCase__ )
__lowerCamelCase = geluaa(lowerCamelCase__ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCamelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowerCamelCase__ ):
get_activation('bogus' )
with self.assertRaises(lowerCamelCase__ ):
get_activation(lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = get_activation('gelu' )
__lowerCamelCase = 1
__lowerCamelCase = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = acta.a
| 469 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowerCAmelCase__ : Any = QuantumRegister(SCREAMING_SNAKE_CASE_ , 'qr' )
lowerCAmelCase__ : List[Any] = ClassicalRegister(SCREAMING_SNAKE_CASE_ , 'cr' )
lowerCAmelCase__ : Optional[int] = QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
lowerCAmelCase__ : str = Aer.get_backend('qasm_simulator' )
lowerCAmelCase__ : List[Any] = execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=10_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
) | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__a , __a : Tuple = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("Enter integers separated by spaces: ")
SCREAMING_SNAKE_CASE_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 597 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_A = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_A = Features({'text': Value('string' )} )
_A = Features({'summary': Value('string' )} )
_A = "text"
_A = "summary"
@property
def __magic_name__ ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 597 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowercase ( nn.Module ):
def __init__( self : int , a : int , a : int , a : int , a : Tuple=0.0 , a : Optional[int] = None , a : str = "geglu" , a : Optional[int] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : str = "layer_norm" , a : bool = False , ):
"""simple docstring"""
super().__init__()
__snake_case : str =only_cross_attention
__snake_case : List[Any] =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__snake_case : Tuple =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__snake_case : Dict =AdaLayerNorm(a , a )
elif self.use_ada_layer_norm_zero:
__snake_case : Union[str, Any] =AdaLayerNormZero(a , a )
else:
__snake_case : Any =nn.LayerNorm(a , elementwise_affine=a )
__snake_case : Union[str, Any] =Attention(
query_dim=a , heads=a , dim_head=a , dropout=a , bias=a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__snake_case : Dict =(
AdaLayerNorm(a , a )
if self.use_ada_layer_norm
else nn.LayerNorm(a , elementwise_affine=a )
)
__snake_case : Optional[Any] =Attention(
query_dim=a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a , dim_head=a , dropout=a , bias=a , upcast_attention=a , ) # is self-attn if encoder_hidden_states is none
else:
__snake_case : Tuple =None
__snake_case : List[Any] =None
# 3. Feed-forward
__snake_case : str =nn.LayerNorm(a , elementwise_affine=a )
__snake_case : int =FeedForward(a , dropout=a , activation_fn=a , final_dropout=a )
# let chunk size default to None
__snake_case : str =None
__snake_case : Optional[int] =0
def _UpperCamelCase ( self : str , a : Optional[int] , a : int ):
"""simple docstring"""
__snake_case : List[Any] =chunk_size
__snake_case : Tuple =dim
def _UpperCamelCase ( self : List[Any] , a : torch.FloatTensor , a : Optional[torch.FloatTensor] = None , a : Optional[torch.FloatTensor] = None , a : Optional[torch.FloatTensor] = None , a : Optional[torch.LongTensor] = None , a : Dict[str, Any] = None , a : Optional[torch.LongTensor] = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
__snake_case : int =self.norma(a , a )
elif self.use_ada_layer_norm_zero:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] =self.norma(
a , a , a , hidden_dtype=hidden_states.dtype )
else:
__snake_case : List[str] =self.norma(a )
__snake_case : int =cross_attention_kwargs if cross_attention_kwargs is not None else {}
__snake_case : Any =self.attna(
a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a , **a , )
if self.use_ada_layer_norm_zero:
__snake_case : List[str] =gate_msa.unsqueeze(1 ) * attn_output
__snake_case : List[Any] =attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__snake_case : Tuple =(
self.norma(a , a ) if self.use_ada_layer_norm else self.norma(a )
)
__snake_case : Optional[int] =self.attna(
a , encoder_hidden_states=a , attention_mask=a , **a , )
__snake_case : Any =attn_output + hidden_states
# 3. Feed-forward
__snake_case : int =self.norma(a )
if self.use_ada_layer_norm_zero:
__snake_case : Any =norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__snake_case : Dict =norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__snake_case : Optional[int] =torch.cat(
[self.ff(a ) for hid_slice in norm_hidden_states.chunk(a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__snake_case : str =self.ff(a )
if self.use_ada_layer_norm_zero:
__snake_case : Tuple =gate_mlp.unsqueeze(1 ) * ff_output
__snake_case : List[Any] =ff_output + hidden_states
return hidden_states
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , a : int , a : Optional[int] = None , a : int = 4 , a : float = 0.0 , a : str = "geglu" , a : bool = False , ):
"""simple docstring"""
super().__init__()
__snake_case : Any =int(dim * mult )
__snake_case : int =dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__snake_case : Optional[Any] =GELU(a , a )
if activation_fn == "gelu-approximate":
__snake_case : Any =GELU(a , a , approximate='''tanh''' )
elif activation_fn == "geglu":
__snake_case : Optional[Any] =GEGLU(a , a )
elif activation_fn == "geglu-approximate":
__snake_case : str =ApproximateGELU(a , a )
__snake_case : List[Any] =nn.ModuleList([] )
# project in
self.net.append(a )
# project dropout
self.net.append(nn.Dropout(a ) )
# project out
self.net.append(nn.Linear(a , a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a ) )
def _UpperCamelCase ( self : List[str] , a : Optional[Any] ):
"""simple docstring"""
for module in self.net:
__snake_case : Dict =module(a )
return hidden_states
class _lowercase ( nn.Module ):
def __init__( self : Any , a : int , a : int , a : str = "none" ):
"""simple docstring"""
super().__init__()
__snake_case : Any =nn.Linear(a , a )
__snake_case : Dict =approximate
def _UpperCamelCase ( self : Optional[int] , a : Dict ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__snake_case : Optional[int] =self.proj(a )
__snake_case : int =self.gelu(a )
return hidden_states
class _lowercase ( nn.Module ):
def __init__( self : Union[str, Any] , a : int , a : int ):
"""simple docstring"""
super().__init__()
__snake_case : List[str] =nn.Linear(a , dim_out * 2 )
def _UpperCamelCase ( self : Optional[Any] , a : Dict ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self : List[Any] , a : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case : Optional[int] =self.proj(a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a )
class _lowercase ( nn.Module ):
def __init__( self : Union[str, Any] , a : int , a : int ):
"""simple docstring"""
super().__init__()
__snake_case : str =nn.Linear(a , a )
def _UpperCamelCase ( self : Optional[Any] , a : List[str] ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.proj(a )
return x * torch.sigmoid(1.7_0_2 * x )
class _lowercase ( nn.Module ):
def __init__( self : Tuple , a : str , a : Any ):
"""simple docstring"""
super().__init__()
__snake_case : int =nn.Embedding(a , a )
__snake_case : List[Any] =nn.SiLU()
__snake_case : Any =nn.Linear(a , embedding_dim * 2 )
__snake_case : Any =nn.LayerNorm(a , elementwise_affine=a )
def _UpperCamelCase ( self : Union[str, Any] , a : Optional[int] , a : str ):
"""simple docstring"""
__snake_case : str =self.linear(self.silu(self.emb(a ) ) )
__snake_case , __snake_case : List[Any] =torch.chunk(a , 2 )
__snake_case : Tuple =self.norm(a ) * (1 + scale) + shift
return x
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , a : str , a : Optional[int] ):
"""simple docstring"""
super().__init__()
__snake_case : Union[str, Any] =CombinedTimestepLabelEmbeddings(a , a )
__snake_case : int =nn.SiLU()
__snake_case : Dict =nn.Linear(a , 6 * embedding_dim , bias=a )
__snake_case : Union[str, Any] =nn.LayerNorm(a , elementwise_affine=a , eps=1e-6 )
def _UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int , a : Tuple , a : int=None ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.linear(self.silu(self.emb(a , a , hidden_dtype=a ) ) )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] =emb.chunk(6 , dim=1 )
__snake_case : Tuple =self.norm(a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowercase ( nn.Module ):
def __init__( self : str , a : int , a : int , a : int , a : Optional[str] = None , a : float = 1e-5 ):
"""simple docstring"""
super().__init__()
__snake_case : Union[str, Any] =num_groups
__snake_case : Dict =eps
if act_fn is None:
__snake_case : Union[str, Any] =None
else:
__snake_case : str =get_activation(a )
__snake_case : Any =nn.Linear(a , out_dim * 2 )
def _UpperCamelCase ( self : str , a : Optional[Any] , a : List[str] ):
"""simple docstring"""
if self.act:
__snake_case : Dict =self.act(a )
__snake_case : List[str] =self.linear(a )
__snake_case : List[Any] =emb[:, :, None, None]
__snake_case , __snake_case : Dict =emb.chunk(2 , dim=1 )
__snake_case : Any =F.group_norm(a , self.num_groups , eps=self.eps )
__snake_case : str =x * (1 + scale) + shift
return x
| 497 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase_ : List[str] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __lowercase ( ) -> Optional[Any]:
__snake_case : List[Any] =Github(os.environ['''GITHUB_TOKEN'''] )
__snake_case : int =g.get_repo('''huggingface/diffusers''' )
__snake_case : Any =repo.get_issues(state='''open''' )
for issue in open_issues:
__snake_case : Dict =sorted(issue.get_comments() , key=lambda a : i.created_at , reverse=a )
__snake_case : Dict =comments[0] if len(a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 497 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase: Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = SpeechTaTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
a : Dict = SpeechTaTokenizer(__snake_case )
a : Any = AddedToken('<mask>' , lstrip=__snake_case , rstrip=__snake_case )
a : str = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple , __snake_case : int ):
a : int = 'this is a test'
a : List[str] = 'this is a test'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict=False , __snake_case : Optional[int]=20 , __snake_case : Tuple=5 ):
a , a : Dict = self.get_input_output_texts(__snake_case )
a : str = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : Optional[Any] = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Tuple ):
a : Any = '<pad>'
a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowercase_ ( self : List[Any] ):
a : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(__snake_case ) , 81 )
def lowercase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
a : Any = tokenizer.vocab_size
a : Optional[Any] = len(__snake_case )
self.assertNotEqual(__snake_case , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a : List[str] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
a : Tuple = tokenizer.add_tokens(__snake_case )
a : Any = tokenizer.vocab_size
a : Tuple = len(__snake_case )
self.assertNotEqual(__snake_case , 0 )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , len(__snake_case ) )
self.assertEqual(__snake_case , all_size + len(__snake_case ) )
a : str = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__snake_case )
self.assertGreaterEqual(len(__snake_case ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a : int = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a : int = tokenizer.add_special_tokens(__snake_case )
a : Any = tokenizer.vocab_size
a : Union[str, Any] = len(__snake_case )
self.assertNotEqual(__snake_case , 0 )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , len(__snake_case ) )
self.assertEqual(__snake_case , all_size_a + len(__snake_case ) )
a : Tuple = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__snake_case )
self.assertGreaterEqual(len(__snake_case ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowercase_ ( self : Optional[Any] ):
pass
def lowercase_ ( self : Optional[int] ):
pass
def lowercase_ ( self : int ):
a : Optional[int] = self.get_tokenizer()
a : int = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__snake_case , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
a : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
a : Any = tokenizer.convert_tokens_to_ids(__snake_case )
# fmt: off
self.assertListEqual(__snake_case , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
a : Any = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowercase_ ( self : Optional[int] ):
# Use custom sequence because this tokenizer does not handle numbers.
a : int = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a : List[Any] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=__snake_case , ) | 526 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__( lowerCamelCase__ ):
lowercase__ = """Salesforce/blip-image-captioning-base"""
lowercase__ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase__ = """image_captioner"""
lowercase__ = AutoModelForVisionaSeq
lowercase__ = ["""image"""]
lowercase__ = ["""text"""]
def __init__( self : Tuple , *__snake_case : int , **__snake_case : List[Any] ):
requires_backends(self , ['vision'] )
super().__init__(*__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : "Image" ):
return self.pre_processor(images=__snake_case , return_tensors='pt' )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
return self.model.generate(**__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : Tuple ):
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip() | 526 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 700 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=64, A=3, A=3, A=2, A=1, A=16, A=[128, 256, 384], A=[4, 6, 8], A=[2, 3, 4], A=[16, 16, 16], A=0, A=[2, 2, 2], A=[2, 2, 2], A=0.02, A=True, A=True, A=2, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = kernel_size
SCREAMING_SNAKE_CASE : Tuple = stride
SCREAMING_SNAKE_CASE : Optional[int] = padding
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Tuple = key_dim
SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : Any = attention_ratio
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = LevitModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A )
SCREAMING_SNAKE_CASE : int = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = LevitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A : Optional[int] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A : Optional[Any] = False
A : List[Any] = False
A : str = False
A : Dict = False
A : Dict = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states
SCREAMING_SNAKE_CASE : Dict = len(self.model_tester.depths ) + 1
self.assertEqual(len(A ), A )
SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : List[str] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : str = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = super()._prepare_for_class(A, A, return_labels=A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Any = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : List[str] = problem_type['title']
SCREAMING_SNAKE_CASE : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE : Tuple = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(A, A, return_labels=A )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
SCREAMING_SNAKE_CASE : Optional[int] = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = LevitModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
| 28 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''dpt'''
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-12 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> int:
super().__init__(**A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase : int = BitConfig(**A )
elif isinstance(A , A ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : Union[str, Any] = BitConfig(**A )
elif isinstance(A , A ):
_UpperCAmelCase : Tuple = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_UpperCAmelCase : int = backbone_featmap_shape
_UpperCAmelCase : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = qkv_bias
_UpperCAmelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCAmelCase : List[Any] = readout_type
_UpperCAmelCase : int = reassemble_factors
_UpperCAmelCase : int = neck_hidden_sizes
_UpperCAmelCase : Tuple = fusion_hidden_size
_UpperCAmelCase : Any = head_in_index
_UpperCAmelCase : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : List[str] = use_auxiliary_head
_UpperCAmelCase : int = auxiliary_loss_weight
_UpperCAmelCase : Any = semantic_loss_ignore_index
_UpperCAmelCase : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : Any = self.backbone_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 506 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'align_text_model'
def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = pad_token_id
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'align_vision_model'
def __init__( self ,_lowerCAmelCase = 3 ,_lowerCAmelCase = 6_00 ,_lowerCAmelCase = 2.0 ,_lowerCAmelCase = 3.1 ,_lowerCAmelCase = 8 ,_lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] ,_lowerCAmelCase = [32, 16, 24, 40, 80, 1_12, 1_92] ,_lowerCAmelCase = [16, 24, 40, 80, 1_12, 1_92, 3_20] ,_lowerCAmelCase = [] ,_lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] ,_lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] ,_lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] ,_lowerCAmelCase = 0.25 ,_lowerCAmelCase = "swish" ,_lowerCAmelCase = 25_60 ,_lowerCAmelCase = "mean" ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 0.001 ,_lowerCAmelCase = 0.99 ,_lowerCAmelCase = 0.2 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = width_coefficient
lowerCamelCase__ = depth_coefficient
lowerCamelCase__ = depth_divisor
lowerCamelCase__ = kernel_sizes
lowerCamelCase__ = in_channels
lowerCamelCase__ = out_channels
lowerCamelCase__ = depthwise_padding
lowerCamelCase__ = strides
lowerCamelCase__ = num_block_repeats
lowerCamelCase__ = expand_ratios
lowerCamelCase__ = squeeze_expansion_ratio
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dim
lowerCamelCase__ = pooling_type
lowerCamelCase__ = initializer_range
lowerCamelCase__ = batch_norm_eps
lowerCamelCase__ = batch_norm_momentum
lowerCamelCase__ = drop_connect_rate
lowerCamelCase__ = sum(_lowerCAmelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'align'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=6_40 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=0.02 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
lowerCamelCase__ = AlignTextConfig(**_lowerCAmelCase )
lowerCamelCase__ = AlignVisionConfig(**_lowerCAmelCase )
lowerCamelCase__ = projection_dim
lowerCamelCase__ = temperature_init_value
lowerCamelCase__ = initializer_range
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 706 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
@staticmethod
def UpperCamelCase_ ( *_A : Optional[int] , **_A : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase_ ( self : Optional[Any] , _A : List[Any] , _A : Optional[Any] , _A : List[str] ):
_UpperCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCamelCase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : str ):
_UpperCamelCase = object_detector(examples[0] , threshold=0.0 )
_UpperCamelCase = len(_A )
self.assertGreater(_A , 0 )
self.assertEqual(
_A , [
{
'''score''': ANY(_A ),
'''label''': ANY(_A ),
'''box''': {'''xmin''': ANY(_A ), '''ymin''': ANY(_A ), '''xmax''': ANY(_A ), '''ymax''': ANY(_A )},
}
for i in range(_A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@require_torch
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCamelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
_UpperCamelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline('''zero-shot-object-detection''' )
_UpperCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
_UpperCamelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@require_torch
@slow
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = 0.2
_UpperCamelCase = pipeline('''zero-shot-object-detection''' )
_UpperCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = 2
_UpperCamelCase = pipeline('''zero-shot-object-detection''' )
_UpperCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ = "AAPL" ) -> str:
UpperCamelCase_ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
UpperCamelCase_ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 712 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCAmelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_UpperCAmelCase = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]="<s>" , _SCREAMING_SNAKE_CASE: Optional[int]="</s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<pad>" , _SCREAMING_SNAKE_CASE: int="<mask>" , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Tuple=False , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ = 1
UpperCamelCase_ = len(self.sp_model )
UpperCamelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase_ = src_lang if src_lang is not None else "eng_Latn"
UpperCamelCase_ = self.lang_code_to_id[self._src_lang]
UpperCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
UpperCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> None:
"""simple docstring"""
UpperCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [1] * len(self.prefix_tokens )
UpperCamelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] , _SCREAMING_SNAKE_CASE: Optional[str] , **_SCREAMING_SNAKE_CASE: Tuple ) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase_ = src_lang
UpperCamelCase_ = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tgt_lang_id
return inputs
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: str ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str = "eng_Latn" , _SCREAMING_SNAKE_CASE: Optional[List[str]] = None , _SCREAMING_SNAKE_CASE: str = "fra_Latn" , **_SCREAMING_SNAKE_CASE: List[str] , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ = src_lang
UpperCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> Optional[int]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
UpperCamelCase_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase_ = []
UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ = [self.cur_lang_code]
UpperCamelCase_ = [self.eos_token_id]
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> None:
"""simple docstring"""
UpperCamelCase_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase_ = []
UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ = [self.cur_lang_code]
UpperCamelCase_ = [self.eos_token_id]
| 371 | 0 |
"""simple docstring"""
_lowerCamelCase = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__SCREAMING_SNAKE_CASE : Optional[int] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(__A )}'''
)
raise ValueError(__A )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''scipy''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch""", """scipy"""] )
| 601 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def _a ( _snake_case ):
"""simple docstring"""
if (len(_snake_case ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: List[Any] = StableDiffusionXLImgaImgPipeline
a__: Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
a__: Dict = PipelineTesterMixin.required_optional_params - {'latents'}
a__: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__: Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__: str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase_ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
lowerCamelCase_ = CLIPTextModel(UpperCAmelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase )
lowerCamelCase_ = CLIPTextModelWithProjection(UpperCAmelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase_ = image / 2 + 0.5
if str(UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
lowerCamelCase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = sd_pipe(**UpperCAmelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
lowerCamelCase_ = sd_pipe.to(UpperCAmelCase )
lowerCamelCase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
# forward without prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = 3 * ['''this is a negative prompt''']
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = 3 * [inputs['''prompt''']]
lowerCamelCase_ = sd_pipe(**UpperCAmelCase )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = 3 * ['''this is a negative prompt''']
lowerCamelCase_ = 3 * [inputs.pop('''prompt''' )]
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = sd_pipe.encode_prompt(UpperCAmelCase , negative_prompt=UpperCAmelCase )
lowerCamelCase_ = sd_pipe(
**UpperCAmelCase , prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , pooled_prompt_embeds=UpperCAmelCase , negative_pooled_prompt_embeds=UpperCAmelCase , )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ):
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
lowerCamelCase_ = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe(**UpperCAmelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 29 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCAmelCase ( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
_UpperCAmelCase = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def UpperCAmelCase ( self ):
_UpperCAmelCase = MobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[int]:
return torch.tensor(
snake_case , dtype=torch.long , device=snake_case , )
a = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 518 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_A : Optional[Any] = '''src/transformers'''
# Matches is_xxx_available()
_A : str = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_A : Optional[Any] = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A : str = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_A : int = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_A : List[Any] = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A : int = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_A : Optional[Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A : Optional[int] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_A : Optional[int] = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_A : Optional[Any] = re.compile(r'''^\s*try:''')
# Catches a line with else:
_A : Union[str, Any] = re.compile(r'''^\s*else:''')
def __lowerCAmelCase ( snake_case : int ) -> Optional[int]:
if _re_test_backend.search(snake_case ) is None:
return None
__lowerCamelCase: Tuple = [b[0] for b in _re_backend.findall(snake_case )]
backends.sort()
return "_and_".join(snake_case )
def __lowerCAmelCase ( snake_case : Tuple ) -> List[str]:
with open(snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCamelCase: Any = f.readlines()
__lowerCamelCase: Optional[int] = 0
while line_index < len(snake_case ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCamelCase: Tuple = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__lowerCamelCase: List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case ):
__lowerCamelCase: Optional[int] = _re_one_line_import_struct.search(snake_case ).groups()[0]
__lowerCamelCase: Optional[int] = re.findall("""\[([^\]]+)\]""" , snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__lowerCamelCase: Dict = _re_import_struct_key_value.search(snake_case )
if single_line_import_search is not None:
__lowerCamelCase: str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case ) > 0]
objects.extend(snake_case )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCamelCase: Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCamelCase: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCamelCase: Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCamelCase: Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__lowerCamelCase: Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case ) is not None:
__lowerCamelCase: int = _re_import_struct_add_many.search(snake_case ).groups()[0].split(""", """ )
__lowerCamelCase: str = [obj[1:-1] for obj in imports if len(snake_case ) > 0]
objects.extend(snake_case )
elif _re_between_brackets.search(snake_case ) is not None:
__lowerCamelCase: str = _re_between_brackets.search(snake_case ).groups()[0].split(""", """ )
__lowerCamelCase: Optional[int] = [obj[1:-1] for obj in imports if len(snake_case ) > 0]
objects.extend(snake_case )
elif _re_quote_object.search(snake_case ) is not None:
objects.append(_re_quote_object.search(snake_case ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__lowerCamelCase: List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCamelCase: Any = []
while (
line_index < len(snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__lowerCamelCase: str = lines[line_index]
__lowerCamelCase: str = _re_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCamelCase: List[str] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCamelCase: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCamelCase: Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCamelCase: Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__lowerCamelCase: Tuple = lines[line_index]
__lowerCamelCase: List[Any] = _re_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowerCamelCase: Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( snake_case : int , snake_case : Tuple ) -> Any:
def find_duplicates(snake_case : Any ):
return [k for k, v in collections.Counter(snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCamelCase: List[Any] = []
for key in import_dict_objects.keys():
__lowerCamelCase: List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
__lowerCamelCase: List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCamelCase: Optional[Any] = """base imports""" if key == """none""" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __lowerCAmelCase ( ) -> Dict:
__lowerCamelCase: Union[str, Any] = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
__lowerCamelCase: Tuple = os.path.join(snake_case , """__init__.py""" )
__lowerCamelCase: str = parse_init(snake_case )
if objects is not None:
__lowerCamelCase: Union[str, Any] = analyze_results(*snake_case )
if len(snake_case ) > 0:
__lowerCamelCase: List[str] = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(snake_case ) )
if len(snake_case ) > 0:
raise ValueError("""\n\n""".join(snake_case ) )
def __lowerCAmelCase ( ) -> Optional[int]:
__lowerCamelCase: int = []
for path, directories, files in os.walk(snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__lowerCamelCase: int = str((Path(snake_case ) / folder).relative_to(snake_case ) )
__lowerCamelCase: Tuple = short_path.replace(os.path.sep , """.""" )
submodules.append(snake_case )
for fname in files:
if fname == "__init__.py":
continue
__lowerCamelCase: List[Any] = str((Path(snake_case ) / fname).relative_to(snake_case ) )
__lowerCamelCase: int = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(snake_case )
return submodules
_A : Dict = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase: Dict = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowerCamelCase: Union[str, Any] = spec.loader.load_module()
__lowerCamelCase: str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case ) > 0:
__lowerCamelCase: Optional[int] = """\n""".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 189 |
_A : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: Optional[int] = input("""Enter message: """ )
__lowerCamelCase: Dict = input("""Enter key [alphanumeric]: """ )
__lowerCamelCase: List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase: Optional[int] = """encrypt"""
__lowerCamelCase: Optional[int] = encrypt_message(snake_case , snake_case )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase: Union[str, Any] = """decrypt"""
__lowerCamelCase: Optional[Any] = decrypt_message(snake_case , snake_case )
print(f'\n{mode.title()}ed message:' )
print(snake_case )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """encrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """decrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str , snake_case : str ) -> str:
__lowerCamelCase: Any = []
__lowerCamelCase: Optional[int] = 0
__lowerCamelCase: Any = key.upper()
for symbol in message:
__lowerCamelCase: int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case ):
__lowerCamelCase: Union[str, Any] = 0
else:
translated.append(snake_case )
return "".join(snake_case )
if __name__ == "__main__":
main()
| 189 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowerCamelCase ( __lowerCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( ) -> Iterator[int]:
__UpperCamelCase : int = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def __lowerCamelCase ( __lowerCAmelCase : int = 2000000 ) -> int:
return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 269 |
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('_T')
class _A ( Generic[_T] ):
def __init__( self : int , lowerCamelCase__ : Iterable[_T] | None = None ):
"""simple docstring"""
__UpperCamelCase : list[_T] = list(iterable or [] )
__UpperCamelCase : list[_T] = []
def __len__( self : str ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Dict ):
"""simple docstring"""
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def a ( self : Union[str, Any] , lowerCamelCase__ : _T ):
"""simple docstring"""
self._stacka.append(lowerCamelCase__ )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Any = self._stacka.pop
__UpperCamelCase : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 269 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: int =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Any =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =inputs["""prompt"""]
SCREAMING_SNAKE_CASE_: Tuple =inputs["""generator"""]
SCREAMING_SNAKE_CASE_: Tuple =inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE_: Tuple =inputs["""output_type"""]
if "image" in inputs:
SCREAMING_SNAKE_CASE_: List[str] =inputs["""image"""]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE_: Optional[Any] =inputs["""mask_image"""]
else:
SCREAMING_SNAKE_CASE_: Tuple =None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE_: List[str] =inputs["""original_image"""]
else:
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: str =pipe.encode_prompt(lowerCAmelCase )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE_: int =image
if mask_image is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE_: str =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =inputs["""generator"""]
SCREAMING_SNAKE_CASE_: List[str] =inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE_: List[str] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE_: Dict ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =image
if mask_image is not None:
SCREAMING_SNAKE_CASE_: List[str] =mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE_: int =original_image
SCREAMING_SNAKE_CASE_: List[str] =pipe_loaded(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Optional[int] =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max()
self.assertLess(lowerCAmelCase , 1E-4 )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Any =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE_: Tuple =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =pipe_loaded(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: List[Any] =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max()
self.assertLess(lowerCAmelCase , 1E-4 )
| 709 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.dummy_uncond_unet
UpperCAmelCase_ : Optional[Any] = KarrasVeScheduler()
UpperCAmelCase_ : int = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type='numpy' ).images
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type='numpy' , return_dict=_UpperCamelCase )[0]
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = 'google/ncsnpp-celebahq-256'
UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Any = KarrasVeScheduler()
UpperCAmelCase_ : Dict = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe(num_inference_steps=2_0 , generator=_UpperCamelCase , output_type='numpy' ).images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase_ : List[Any] = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 406 |
def lowercase__ ( __snake_case : list , __snake_case : list ):
'''simple docstring'''
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__snake_case , __snake_case ) ) )
def lowercase__ ( __snake_case : list[float] ):
'''simple docstring'''
if point:
if isinstance(__snake_case , __snake_case ):
for item in point:
if not isinstance(__snake_case , (int, float) ):
UpperCAmelCase_ : Optional[int] = (
'Expected a list of numbers as input, found '
F"{type(__snake_case ).__name__}"
)
raise TypeError(__snake_case )
else:
UpperCAmelCase_ : int = F"Expected a list of numbers as input, found {type(__snake_case ).__name__}"
raise TypeError(__snake_case )
else:
raise ValueError('Missing an input' )
def lowercase__ ( __snake_case : list , __snake_case : list ):
'''simple docstring'''
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__snake_case , __snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ = 250_004
UpperCAmelCase_ = 250_020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = MBartTokenizer
UpperCAmelCase__ : Union[str, Any] = MBartTokenizerFast
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = True
def lowerCAmelCase__ ( self: Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowerCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
__lowerCamelCase = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
__lowerCamelCase = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = 'facebook/mbart-large-en-ro'
UpperCAmelCase__ : Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
UpperCAmelCase__ : Tuple = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
UpperCAmelCase__ : int = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def lowerCAmelCase__ ( cls: List[Any] ):
__lowerCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__lowerCamelCase = 1
return cls
def lowerCAmelCase__ ( self: str ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
__lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCamelCase = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
__lowerCamelCase = 10
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="""pt""" )
__lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="""pt""" )
__lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="""pt""" )
__lowerCamelCase = targets["""input_ids"""]
__lowerCamelCase = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 80 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 1 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = None
_UpperCAmelCase = BloomTokenizerFast
_UpperCAmelCase = BloomTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = '''tokenizer_file'''
_UpperCAmelCase = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCamelCase ( self ) -> Dict:
super().setUp()
snake_case = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **A__ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_rust_tokenizer()
snake_case = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
snake_case = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
snake_case = tokenizer.batch_encode_plus(A__ )['''input_ids''']
self.assertListEqual(A__ , A__ )
snake_case = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self , A__=6 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(A__ , max_length=A__ )
tokenizer_r.encode_plus(A__ , max_length=A__ )
tokenizer_r.batch_encode_plus(A__ , max_length=A__ )
tokenizer_r.encode(A__ , max_length=A__ )
tokenizer_r.batch_encode_plus(A__ , max_length=A__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
snake_case = None # Hotfixing padding = None
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_rust_tokenizer()
snake_case = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=A__ )
snake_case = next(iter(A__ ) )['''premise'''] # pick up one data
snake_case = list(sample_data.values() )
snake_case = list(map(tokenizer.encode , A__ ) )
snake_case = [tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ ) for x in output_tokens]
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 44 |
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.