code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE :Tuple = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int = 2 , lowerCAmelCase_ :int = 1 , lowerCAmelCase_ :int = 3 , )->int | None:
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int ) -> int:
return (pow(lowerCAmelCase_ , 2 ) + step) % modulus
for _ in range(lowerCAmelCase_ ):
# These track the position within the cycle detection logic.
snake_case_ = seed
snake_case_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
snake_case_ = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
snake_case_ = gcd(hare - tortoise , lowerCAmelCase_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
snake_case_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE :List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
SCREAMING_SNAKE_CASE :List[str] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 283 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 77 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : str = 5_0002
@require_sentencepiece
@require_tokenizers
class A ( a , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = PLBartTokenizer
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = False
def __lowerCAmelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_a = PLBartTokenizer(snake_case_ , language_codes="base" , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> int:
_a = PLBartTokenizer(snake_case_ , language_codes="base" , keep_accents=snake_case_ )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_a = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_a = tokenizer.vocab_size
_a = [tokenizer.convert_ids_to_tokens(snake_case_ ) for x in range(end - 4 , snake_case_ )]
self.assertListEqual(snake_case_ , ["__java__", "__python__", "__en_XX__", "<mask>"] )
_a = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_a = tokenizer(snake_case_ ).input_ids
self.assertEqual(
tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) , snake_case_ , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = PLBartTokenizer(snake_case_ , language_codes="multi" , keep_accents=snake_case_ )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_a = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_a = tokenizer.vocab_size
_a = [tokenizer.convert_ids_to_tokens(snake_case_ ) for x in range(end - 7 , snake_case_ )]
self.assertListEqual(
snake_case_ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
_a = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_a = tokenizer(snake_case_ ).input_ids
self.assertEqual(
tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) , snake_case_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : int = """uclanlp/plbart-python-en_XX"""
__UpperCAmelCase : Tuple = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
__UpperCAmelCase : int = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
__UpperCAmelCase : Optional[Any] = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
_a = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , snake_case_ )
_a = 1_0
_a = self.tokenizer(snake_case_ , max_length=snake_case_ , truncation=snake_case_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , snake_case_ )
self.assertEqual(len(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case_ )
_a = PLBartTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , snake_case_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
_a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.tokenizer(self.src_text , padding=snake_case_ , truncation=snake_case_ , max_length=3 , return_tensors="pt" )
_a = self.tokenizer(
text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=1_0 , return_tensors="pt" )
_a = targets["input_ids"]
_a = shift_tokens_right(snake_case_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 131 |
'''simple docstring'''
from torch import nn
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ) -> List[Any]:
super().__init__()
_a = class_size
_a = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_a = nn.Linear(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_a = self.mlp(snake_case_ )
return logits
| 131 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 720 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self: List[Any] ):
lowercase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowercase__ : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__ : Union[str, Any] = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowercase__ : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase__ : str = {'unk_token': '<unk>'}
lowercase__ : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
lowercase__ : Optional[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowercase__ : List[str] = os.path.join(self.tmpdirname, lowerCamelCase_ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_, lowerCamelCase_ )
def snake_case__( self: Any, **lowerCamelCase_: Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def snake_case__( self: List[Any], **lowerCamelCase_: str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def snake_case__( self: int, **lowerCamelCase_: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def snake_case__( self: Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case__( self: List[str] ):
lowercase__ : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase__ : Any = [Image.fromarray(np.moveaxis(lowerCamelCase_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__( self: str ):
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_rust_tokenizer()
lowercase__ : Dict = self.get_image_processor()
lowercase__ : str = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase_ )
lowercase__ : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase_ )
def snake_case__( self: Dict ):
lowercase__ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowercase__ : List[Any] = self.get_image_processor(do_normalize=lowerCamelCase_, padding_value=1.0 )
lowercase__ : Optional[int] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase_ )
def snake_case__( self: int ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : Optional[int] = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : List[str] = image_processor(lowerCamelCase_, return_tensors='np' )
lowercase__ : int = processor(images=lowerCamelCase_, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def snake_case__( self: int ):
lowercase__ : Any = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowercase__ : Tuple = 'lower newer'
lowercase__ : Union[str, Any] = processor(text=lowerCamelCase_ )
lowercase__ : str = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def snake_case__( self: Optional[int] ):
lowercase__ : Any = self.get_image_processor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : int = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowercase__ : List[Any] = 'lower newer'
lowercase__ : Optional[Any] = self.prepare_image_inputs()
lowercase__ : Optional[Any] = processor(text=lowerCamelCase_, images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def snake_case__( self: str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : List[str] = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : Tuple = processor(images=lowerCamelCase_, visual_prompt=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def snake_case__( self: Dict ):
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPSegProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowercase__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Tuple = processor.batch_decode(lowerCamelCase_ )
lowercase__ : Tuple = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
| 266 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = 42
_A = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 266 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase_ ( nn.Module ):
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : float = 0.0
_lowerCamelCase : int = 1
_lowerCamelCase : int = 1
_lowerCamelCase : bool = True
_lowerCamelCase : bool = False
_lowerCamelCase : bool = False
_lowerCamelCase : bool = False
_lowerCamelCase : jnp.dtype = jnp.floataa
def __magic_name__ ( self ):
a_ = []
a_ = []
for i in range(self.num_layers ):
a_ = self.in_channels if i == 0 else self.out_channels
a_ = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
a_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
a_ = resnets
a_ = attentions
if self.add_downsample:
a_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
a_ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a_ = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
a_ = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
a_ = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase_ ( nn.Module ):
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : float = 0.0
_lowerCamelCase : int = 1
_lowerCamelCase : bool = True
_lowerCamelCase : jnp.dtype = jnp.floataa
def __magic_name__ ( self ):
a_ = []
for i in range(self.num_layers ):
a_ = self.in_channels if i == 0 else self.out_channels
a_ = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
a_ = resnets
if self.add_downsample:
a_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
a_ = ()
for resnet in self.resnets:
a_ = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
a_ = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase_ ( nn.Module ):
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : float = 0.0
_lowerCamelCase : int = 1
_lowerCamelCase : int = 1
_lowerCamelCase : bool = True
_lowerCamelCase : bool = False
_lowerCamelCase : bool = False
_lowerCamelCase : bool = False
_lowerCamelCase : jnp.dtype = jnp.floataa
def __magic_name__ ( self ):
a_ = []
a_ = []
for i in range(self.num_layers ):
a_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ = self.prev_output_channel if i == 0 else self.out_channels
a_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
a_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
a_ = resnets
a_ = attentions
if self.add_upsample:
a_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a_ = res_hidden_states_tuple[-1]
a_ = res_hidden_states_tuple[:-1]
a_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
a_ = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
a_ = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : float = 0.0
_lowerCamelCase : int = 1
_lowerCamelCase : bool = True
_lowerCamelCase : jnp.dtype = jnp.floataa
def __magic_name__ ( self ):
a_ = []
for i in range(self.num_layers ):
a_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ = self.prev_output_channel if i == 0 else self.out_channels
a_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
a_ = resnets
if self.add_upsample:
a_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet in self.resnets:
# pop res hidden states
a_ = res_hidden_states_tuple[-1]
a_ = res_hidden_states_tuple[:-1]
a_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
a_ = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
_lowerCamelCase : int
_lowerCamelCase : float = 0.0
_lowerCamelCase : int = 1
_lowerCamelCase : int = 1
_lowerCamelCase : bool = False
_lowerCamelCase : bool = False
_lowerCamelCase : jnp.dtype = jnp.floataa
def __magic_name__ ( self ):
# there is always at least one resnet
a_ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a_ = []
for _ in range(self.num_layers ):
a_ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
a_ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
a_ = resnets
a_ = attentions
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
a_ = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a_ = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
a_ = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states | 403 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase_ ( unittest.TestCase ):
def __magic_name__ ( self ):
a_ = 10
def __magic_name__ ( self ):
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
a_ , a_ = process_story(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
def __magic_name__ ( self ):
a_ = """"""
a_ , a_ = process_story(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
def __magic_name__ ( self ):
a_ = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
a_ , a_ = process_story(_SCREAMING_SNAKE_CASE )
a_ = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = ["""It was the best of times."""]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 0 ).numpy() , expected.numpy() )
def __magic_name__ ( self ):
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 23 ).numpy() , expected.numpy() )
def __magic_name__ ( self ):
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 1 ).numpy() , expected.numpy() )
def __magic_name__ ( self ):
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
np.testing.assert_array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 403 | 1 |
import pytest
UpperCAmelCase_ = """__dummy_dataset1__"""
UpperCAmelCase_ = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Optional[int] , _snake_case :Optional[Any] ) -> Optional[int]:
_A = dataset_loading_script_name
_A = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=_snake_case )
_A = script_dir / F'''{script_name}.py'''
with open(_snake_case , '''w''' ) as f:
f.write(_snake_case )
return str(_snake_case )
| 2 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 512 | 0 |
def __snake_case ( _UpperCAmelCase = 10_00 ):
"""simple docstring"""
lowercase = 2**power
lowercase = 0
while n:
lowercase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 719 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _UpperCAmelCase = 35 ):
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 0 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def snake_case ( lowerCamelCase = 10_000 ):
'''simple docstring'''
__lowercase = []
for num in range(1 , lowerCamelCase ):
__lowercase = 0
__lowercase = num
while iterations < 50:
__lowercase = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase_ : Dict = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> Tuple:
UpperCAmelCase = str(id_ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = []
UpperCAmelCase = {} # {vertex:distance}
def __lt__( self , lowercase_ ) -> int:
return self.key < other.key
def __repr__( self ) -> str:
return self.id
def a_ ( self , lowercase_ ) -> int:
self.neighbors.append(lowercase_ )
def a_ ( self , lowercase_ , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = weight
def lowercase__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : list , lowerCAmelCase : Vertex ) -> list:
"""simple docstring"""
UpperCAmelCase = []
for u in graph:
UpperCAmelCase = math.inf
UpperCAmelCase = None
UpperCAmelCase = 0
UpperCAmelCase = graph[:]
while q:
UpperCAmelCase = min(lowerCAmelCase )
q.remove(lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase = u
UpperCAmelCase = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( lowerCAmelCase : list , lowerCAmelCase : Vertex ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
UpperCAmelCase = math.inf
UpperCAmelCase = None
UpperCAmelCase = 0
UpperCAmelCase = list(lowerCAmelCase )
hq.heapify(lowerCAmelCase )
while h:
UpperCAmelCase = hq.heappop(lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase = u
UpperCAmelCase = u.edges[v.id]
hq.heapify(lowerCAmelCase )
for i in range(1 , len(lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None ) -> Dict:
"""simple docstring"""
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=4_0_0 , lowercase_=2_0_0_0 , lowercase_=2_0_4_8 , lowercase_=1_2_8 , lowercase_=1 , lowercase_=5_1_2 , lowercase_=3_0 , lowercase_=4_4_1_0_0 , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = spectrogram_length
UpperCAmelCase = feature_size
UpperCAmelCase = num_audio_channels
UpperCAmelCase = hop_length
UpperCAmelCase = chunk_length
UpperCAmelCase = sampling_rate
def a_ ( self ) -> str:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a_ ( self , lowercase_=False , lowercase_=False ) -> Optional[Any]:
def _flatten(lowercase_ ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractor
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = TvltFeatureExtractionTester(self )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_ , 'feature_size' ) )
self.assertTrue(hasattr(lowercase_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_ , 'hop_length' ) )
self.assertTrue(hasattr(lowercase_ , 'chunk_length' ) )
self.assertTrue(hasattr(lowercase_ , 'sampling_rate' ) )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
UpperCAmelCase = self.feature_extraction_class.from_pretrained(lowercase_ )
UpperCAmelCase = feat_extract_first.to_dict()
UpperCAmelCase = feat_extract_second.to_dict()
UpperCAmelCase = dict_first.pop('mel_filters' )
UpperCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(lowercase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
UpperCAmelCase = self.feature_extraction_class.from_json_file(lowercase_ )
UpperCAmelCase = feat_extract_first.to_dict()
UpperCAmelCase = feat_extract_second.to_dict()
UpperCAmelCase = dict_first.pop('mel_filters' )
UpperCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> int:
# Initialize feature_extractor
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCAmelCase = feature_extractor(
lowercase_ , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(lowercase_ )
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a_ ( self , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a_ ( self ) -> Tuple:
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = TvltFeatureExtractor()
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
UpperCAmelCase = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase_ , atol=1E-4 ) )
| 183 | 1 |
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: str ) -> None:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
def _UpperCAmelCase ( self: Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any ) -> None:
'''simple docstring'''
self.data.append(__lowerCAmelCase )
__UpperCAmelCase = self.tail + 1
def _UpperCAmelCase ( self: Dict ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.data[self.head]
__UpperCAmelCase = self.head + 1
return ret
def _UpperCAmelCase ( self: Any ) -> int:
'''simple docstring'''
return self.tail - self.head
def _UpperCAmelCase ( self: Tuple ) -> None:
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Any , __lowerCAmelCase: Any ) -> None:
'''simple docstring'''
__UpperCAmelCase = data
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
def _UpperCAmelCase ( self: List[str] ) -> Any:
'''simple docstring'''
return self.data
def _UpperCAmelCase ( self: Optional[int] ) -> MyNode | None:
'''simple docstring'''
return self.left
def _UpperCAmelCase ( self: str ) -> MyNode | None:
'''simple docstring'''
return self.right
def _UpperCAmelCase ( self: Optional[Any] ) -> int:
'''simple docstring'''
return self.height
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Any ) -> None:
'''simple docstring'''
__UpperCAmelCase = data
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: MyNode | None ) -> None:
'''simple docstring'''
__UpperCAmelCase = node
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: MyNode | None ) -> None:
'''simple docstring'''
__UpperCAmelCase = node
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: int ) -> None:
'''simple docstring'''
__UpperCAmelCase = height
def __lowerCAmelCase ( A_ : MyNode | None ) -> int:
if node is None:
return 0
return node.get_height()
def __lowerCAmelCase ( A_ : int , A_ : int ) -> int:
if a > b:
return a
return b
def __lowerCAmelCase ( A_ : MyNode ) -> MyNode:
print("left rotation node:" , node.get_data() )
__UpperCAmelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(A_ )
__UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(A_ )
__UpperCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(A_ )
return ret
def __lowerCAmelCase ( A_ : MyNode ) -> MyNode:
print("right rotation node:" , node.get_data() )
__UpperCAmelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(A_ )
__UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(A_ )
__UpperCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(A_ )
return ret
def __lowerCAmelCase ( A_ : MyNode ) -> MyNode:
__UpperCAmelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(A_ ) )
return right_rotation(A_ )
def __lowerCAmelCase ( A_ : MyNode ) -> MyNode:
__UpperCAmelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(A_ ) )
return left_rotation(A_ )
def __lowerCAmelCase ( A_ : MyNode | None , A_ : Any ) -> MyNode | None:
if node is None:
return MyNode(A_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , A_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCAmelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCAmelCase = right_rotation(A_ )
else:
__UpperCAmelCase = lr_rotation(A_ )
else:
node.set_right(insert_node(node.get_right() , A_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCAmelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCAmelCase = rl_rotation(A_ )
else:
__UpperCAmelCase = left_rotation(A_ )
__UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(A_ )
return node
def __lowerCAmelCase ( A_ : MyNode ) -> Any:
while True:
__UpperCAmelCase = root.get_right()
if right_child is None:
break
__UpperCAmelCase = right_child
return root.get_data()
def __lowerCAmelCase ( A_ : MyNode ) -> Any:
while True:
__UpperCAmelCase = root.get_left()
if left_child is None:
break
__UpperCAmelCase = left_child
return root.get_data()
def __lowerCAmelCase ( A_ : MyNode , A_ : Any ) -> MyNode | None:
__UpperCAmelCase = root.get_left()
__UpperCAmelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCAmelCase = get_left_most(A_ )
root.set_data(A_ )
root.set_right(del_node(A_ , A_ ) )
elif left_child is not None:
__UpperCAmelCase = left_child
elif right_child is not None:
__UpperCAmelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(A_ , A_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(A_ , A_ ) )
if get_height(A_ ) - get_height(A_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCAmelCase = left_rotation(A_ )
else:
__UpperCAmelCase = rl_rotation(A_ )
elif get_height(A_ ) - get_height(A_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCAmelCase = right_rotation(A_ )
else:
__UpperCAmelCase = lr_rotation(A_ )
__UpperCAmelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(A_ )
return root
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Optional[int] ) -> None:
'''simple docstring'''
__UpperCAmelCase = None
def _UpperCAmelCase ( self: List[str] ) -> int:
'''simple docstring'''
return get_height(self.root )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any ) -> None:
'''simple docstring'''
print("insert:" + str(__lowerCAmelCase ) )
__UpperCAmelCase = insert_node(self.root , __lowerCAmelCase )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any ) -> None:
'''simple docstring'''
print("delete:" + str(__lowerCAmelCase ) )
if self.root is None:
print("Tree is empty!" )
return
__UpperCAmelCase = del_node(self.root , __lowerCAmelCase )
def __str__( self: Optional[Any] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
__UpperCAmelCase = ""
__UpperCAmelCase = MyQueue()
q.push(self.root )
__UpperCAmelCase = self.get_height()
if layer == 0:
return output
__UpperCAmelCase = 0
while not q.is_empty():
__UpperCAmelCase = q.pop()
__UpperCAmelCase = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__lowerCAmelCase )
q.push(__lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCAmelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __lowerCAmelCase ) - 1:
__UpperCAmelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ = AVLtree()
a_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 221 | def __lowerCAmelCase ( A_ : int ) -> int:
__UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase ( A_ : int = 1_00 ) -> int:
__UpperCAmelCase = 1
__UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
__UpperCAmelCase = pre_numerator
__UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
__UpperCAmelCase = cur_numerator
__UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(A_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 221 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_UpperCAmelCase )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' )
if str(_UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_UpperCAmelCase )
else:
lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
lowerCAmelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase = sd_pipe(**_UpperCAmelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
lowerCAmelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase = '''french fries'''
lowerCAmelCase = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
lowerCAmelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase = [inputs['''prompt''']] * 2
lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowerCAmelCase = image / 2 + 0.5
lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
lowerCAmelCase = sd_pipe(**_UpperCAmelCase ).images
lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCAmelCase = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
lowerCAmelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase = sd_pipe(**_UpperCAmelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
lowerCAmelCase = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type='pt' ) )[0]
lowerCAmelCase = components['''vae''']
lowerCAmelCase = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCAmelCase = pipe(**_UpperCAmelCase )[0]
lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
lowerCAmelCase = torch.manual_seed(_UpperCAmelCase )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
lowerCAmelCase = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**_UpperCAmelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_UpperCAmelCase )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**_UpperCAmelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_UpperCAmelCase )
lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**_UpperCAmelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 0
def callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCAmelCase = False
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**_UpperCAmelCase )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase = inputs['''image'''].resize((5_04, 5_04) )
lowerCAmelCase = '''timbrooks/instruct-pix2pix'''
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = pipe(**_UpperCAmelCase )
lowerCAmelCase = output.images[0]
lowerCAmelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
lowerCAmelCase = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 713 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_UpperCamelCase : List[Any] = "\\n\n"
_UpperCamelCase : List[Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_UpperCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCAmelCase = 'cuda'
else:
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCAmelCase = model.config.max_length - 1
else:
lowerCAmelCase = model.config.max_length
lowerCAmelCase = tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors='pt' , return_attention_mask=_SCREAMING_SNAKE_CASE , ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = encodings['input_ids']
lowerCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCAmelCase = []
lowerCAmelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = min(start_index + batch_size , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = encoded_texts[start_index:end_index]
lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
lowerCAmelCase = encoded_batch
with torch.no_grad():
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).logits
lowerCAmelCase = out_logits[..., :-1, :].contiguous()
lowerCAmelCase = labels[..., 1:].contiguous()
lowerCAmelCase = attn_mask[..., 1:].contiguous()
lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE )}
| 514 | 0 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
# A local function to see if a dot lands in the circle.
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
lowerCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
lowerCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
'''simple docstring'''
from math import factorial
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 100 ) -> int:
"""simple docstring"""
return sum(map(_SCREAMING_SNAKE_CASE , str(factorial(_SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 433 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __A ( a ):
"""simple docstring"""
A_ = ['image_processor', 'feature_extractor']
A_ = 'TvltImageProcessor'
A_ = 'TvltFeatureExtractor'
def __init__( self , _lowerCamelCase , _lowerCamelCase )-> Optional[Any]:
super().__init__(image_processor=_lowerCamelCase , feature_extractor=_lowerCamelCase )
lowercase__ = image_processor
lowercase__ = feature_extractor
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , *_lowerCamelCase , **_lowerCamelCase , )-> Union[str, Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowercase__ = None
if images is not None:
lowercase__ = self.image_processor(_lowerCamelCase , mask_pixel=_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if images_mixed is not None:
lowercase__ = self.image_processor(_lowerCamelCase , is_mixed=_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if audio is not None:
lowercase__ = self.feature_extractor(
_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , mask_audio=_lowerCamelCase , **_lowerCamelCase )
lowercase__ = {}
if audio is not None:
output_dict.update(_lowerCamelCase )
if images is not None:
output_dict.update(_lowerCamelCase )
if images_mixed_dict is not None:
output_dict.update(_lowerCamelCase )
return output_dict
@property
def snake_case_( self )-> Optional[Any]:
lowercase__ = self.image_processor.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 318 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int , lowercase : int ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase__ = str(bin(lowercase ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(lowercase ) )[2:] # remove the leading "0b"
lowercase__ = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( _UpperCamelCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : str = """OwlViTImageProcessor"""
lowerCamelCase_ : Tuple = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
__A : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
__A : Optional[int] = kwargs.pop("feature_extractor" )
__A : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
__A : List[str] = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
__A : Optional[Any] = []
# Maximum number of queries across batch
__A : Tuple = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
__A : Any = t + [" "] * (max_num_queries - len(lowerCAmelCase_ ))
__A : Tuple = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__A : Optional[int] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__A : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__A : Tuple = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__A : Optional[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__A : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__A : Tuple = BatchEncoding()
__A : Optional[int] = input_ids
__A : str = attention_mask
if query_images is not None:
__A : int = BatchEncoding()
__A : Optional[Any] = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
__A : List[str] = query_pixel_values
if images is not None:
__A : str = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
__A : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__A : List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __UpperCAmelCase( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCAmelCase( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 520 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["input_features", "is_longer"]
def __init__( self , snake_case_=6_4 , snake_case_=4_8_0_0_0 , snake_case_=4_8_0 , snake_case_=1_0 , snake_case_=1_0_2_4 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 1_4_0_0_0 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ) -> Tuple:
'''simple docstring'''
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
__lowercase = top_db
__lowercase = truncation
__lowercase = padding
__lowercase = fft_window_size
__lowercase = (fft_window_size >> 1) + 1
__lowercase = hop_length
__lowercase = max_length_s
__lowercase = max_length_s * sampling_rate
__lowercase = sampling_rate
__lowercase = frequency_min
__lowercase = frequency_max
__lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale='''htk''' , )
__lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm='''slaney''' , mel_scale='''slaney''' , )
def A ( self ) -> Dict[str, Any]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A ( self , snake_case_ , snake_case_ = None ) -> np.ndarray:
'''simple docstring'''
__lowercase = spectrogram(
snake_case_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> str:
'''simple docstring'''
__lowercase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowercase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowercase = [0]
# randomly choose index for each part
__lowercase = np.random.choice(ranges[0] )
__lowercase = np.random.choice(ranges[1] )
__lowercase = np.random.choice(ranges[2] )
__lowercase = mel[idx_front : idx_front + chunk_frames, :]
__lowercase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowercase = mel[idx_back : idx_back + chunk_frames, :]
__lowercase = torch.tensor(mel[None, None, :] )
__lowercase = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=snake_case_ )
__lowercase = mel_shrink[0][0].numpy()
__lowercase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowercase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowercase = len(snake_case_ ) - max_length
__lowercase = np.random.randint(0 , overflow + 1 )
__lowercase = waveform[idx : idx + max_length]
__lowercase = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowercase = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
__lowercase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowercase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowercase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowercase = False
else:
__lowercase = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
__lowercase = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
__lowercase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowercase = int(max_length / len(snake_case_ ) )
__lowercase = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowercase = int(max_length / len(snake_case_ ) )
__lowercase = np.stack(np.tile(snake_case_ , snake_case_ ) )
__lowercase = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__lowercase = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
__lowercase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowercase = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> BatchFeature:
'''simple docstring'''
__lowercase = truncation if truncation is not None else self.truncation
__lowercase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowercase = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__lowercase = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
__lowercase = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
__lowercase = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
__lowercase = []
__lowercase = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowercase = np.random.randint(0 , len(snake_case_ ) )
__lowercase = True
if isinstance(input_mel[0] , snake_case_ ):
__lowercase = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowercase = [[longer] for longer in is_longer]
__lowercase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__lowercase = BatchFeature(snake_case_ )
if return_tensors is not None:
__lowercase = input_features.convert_to_tensors(snake_case_ )
return input_features
| 527 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=6_4 , snake_case_=None ) -> List[str]:
'''simple docstring'''
__lowercase = np.random.default_rng(snake_case_ )
__lowercase = length
__lowercase = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a + self.b
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__lowercase = load_dataset('''csv''' , data_files=_UpperCamelCase )
__lowercase = datasets['''train'''].unique('''label''' )
__lowercase = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__lowercase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
__lowercase = DataLoader(tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 527 | 1 |
"""simple docstring"""
import operator as op
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = []
__UpperCAmelCase = lambda snake_case_ , snake_case_ : int(x / y ) # noqa: E731 integer division operation
__UpperCAmelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(snake_case_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
else:
__UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
__UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(snake_case_ ) , int(snake_case_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_lowercase : Any = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 49 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ (enum.Enum ):
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
__magic_name__ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Any = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
UpperCAmelCase_ : List[Any] = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : Optional[int] = {**self._forward_params, **forward_params}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[Any] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : Tuple = prefix
if prefix:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
UpperCAmelCase_ : Dict = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generate_kwargs
UpperCAmelCase_ : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : Tuple = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : int = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : int = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]="" , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : Any = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : Optional[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Dict = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Tuple = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ : Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Union[str, Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Dict:
UpperCAmelCase_ : Optional[Any] = model_inputs["input_ids"]
UpperCAmelCase_ : str = model_inputs.get("attention_mask" , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = 1
else:
UpperCAmelCase_ : Union[str, Any] = input_ids.shape[0]
UpperCAmelCase_ : Any = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : Any = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Tuple = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[int] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : int = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Optional[int] = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : List[Any] = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=ReturnType.FULL_TEXT , lowerCAmelCase_ : Dict=True ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs["generated_sequence"][0]
UpperCAmelCase_ : int = model_outputs["input_ids"]
UpperCAmelCase_ : List[str] = model_outputs["prompt_text"]
UpperCAmelCase_ : Union[str, Any] = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : str = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[Any] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Union[str, Any] = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : int = {"generated_text": all_text}
records.append(lowerCAmelCase_ )
return records
| 95 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowercase = logging.get_logger(__name__)
_lowercase = "T5Config"
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__A : Dict = '''naver-clova-ix/donut-base'''
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Dict = DonutProcessor.from_pretrained(A )
def A ( self : List[Any] ) -> str:
lowercase_ : Any = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase_ : List[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase_ : List[str] = self.processor.tokenajson(A )
self.assertDictEqual(A , A )
| 231 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[int] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : List[str] = "realm"
def __init__( self : List[Any] , A : List[str]=3_05_22 , A : Optional[int]=7_68 , A : Dict=1_28 , A : Tuple=12 , A : str=12 , A : int=8 , A : List[Any]=30_72 , A : int="gelu_new" , A : Any=0.1 , A : Any=0.1 , A : Any=5_12 , A : str=2 , A : str=0.02 , A : List[str]=1e-12 , A : Union[str, Any]=2_56 , A : List[Any]=10 , A : Optional[Any]=1e-3 , A : Any=5 , A : Tuple=3_20 , A : str=13_35_37_18 , A : List[str]=50_00 , A : Tuple=1 , A : str=0 , A : Any=2 , **A : Optional[Any] , ) -> Tuple:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
# Common config
lowercase_ : List[Any] = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Optional[Any] = retriever_proj_size
lowercase_ : str = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : List[str] = num_candidates
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : List[str] = initializer_range
lowercase_ : str = type_vocab_size
lowercase_ : Tuple = layer_norm_eps
# Reader config
lowercase_ : str = span_hidden_size
lowercase_ : int = max_span_width
lowercase_ : Optional[int] = reader_layer_norm_eps
lowercase_ : List[str] = reader_beam_size
lowercase_ : Tuple = reader_seq_len
# Retrieval config
lowercase_ : List[Any] = num_block_records
lowercase_ : Tuple = searcher_beam_size
| 231 | 1 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
A = 'naver-clova-ix/donut-base'
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : int ) -> str:
'''simple docstring'''
A = DonutProcessor.from_pretrained(snake_case )
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
A = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
A = self.processor.tokenajson(snake_case )
self.assertDictEqual(snake_case , snake_case )
| 109 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = json.loads(f.read() )
A = collections.OrderedDict()
A = collections.OrderedDict()
A = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = f.readlines()
A = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
A = b
A = idx
for wd in b:
A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any]="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : Any="<|startoftext|>" , snake_case : Any="<|endoftext|>" , snake_case : Tuple=False , **snake_case : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
A = do_clean_text
A , A , A , A = load_vocab_and_emoji(snake_case , snake_case )
A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.raw_vocab )
def A_ ( self : str ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A_ ( self : List[str] , snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text )
def A_ ( self : int , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self : int , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(snake_case )
def A_ ( self : str , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
A = ''.join(snake_case ).strip()
return out_string
def A_ ( self : Optional[int] , snake_case : "Conversation" ) -> List[int]:
'''simple docstring'''
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
def A_ ( self : int , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A = 0
if os.path.isdir(snake_case ):
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
A = token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , snake_case )
return vocab_file, emoji_file
class UpperCAmelCase__ ( UpperCamelCase ):
def __init__( self : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[Any] ) -> int:
'''simple docstring'''
A = vocab # same as swe
A = ids_to_tokens # same as bpe
A = emoji
A = np.max([len(snake_case ) for w in self.vocab.keys()] )
A = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
A = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
A = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
A = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
A = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.ids_to_tokens )
def A_ ( self : Tuple , snake_case : Any ) -> Optional[int]:
'''simple docstring'''
A = self.content_repattera.sub('<URL>' , snake_case )
A = self.content_repattera.sub('<EMAIL>' , snake_case )
A = self.content_repattera.sub('<TEL>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<PRICE>' , snake_case )
A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def A_ ( self : Any , snake_case : int , snake_case : Any=False ) -> Any:
'''simple docstring'''
A = text.replace(' ' , '<SP>' )
A = text.replace(' ' , '<SP>' )
A = text.replace('\r\n' , '<BR>' )
A = text.replace('\n' , '<BR>' )
A = text.replace('\r' , '<BR>' )
A = text.replace('\t' , '<TAB>' )
A = text.replace('—' , 'ー' )
A = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
A = text.replace(snake_case , snake_case )
if clean:
A = self.clean_text(snake_case )
def check_simbol(snake_case : Union[str, Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(snake_case : List[Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
A = 0
A = []
while pos < len(snake_case ):
A = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A = [] # (token_id, token, pos)
for e in range(snake_case , snake_case , -1 ):
A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
A , A , A = sorted(snake_case , key=lambda snake_case : x[0] )[0]
result.append(snake_case )
A = e
else:
A = pos + 1
A = text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
A = end
return result
def A_ ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any]="\n" ) -> List[Any]:
'''simple docstring'''
A = []
A = []
A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = ''.join(snake_case )
return text
| 109 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__A = {"""facebook/blenderbot-3B""": 128}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = VOCAB_FILES_NAMES
__magic_name__ :int = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Optional[int] = ["""input_ids""", """attention_mask"""]
__magic_name__ :str = BlenderbotTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :List[str] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :int = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = add_prefix_space
lowerCAmelCase__ :Optional[Any] = 'post_processor'
lowerCAmelCase__ :List[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Union[str, Any] = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :Optional[int] = tuple(state['cls'] )
lowerCAmelCase__ :Optional[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Dict = add_prefix_space
lowerCAmelCase__ :List[Any] = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Tuple = trim_offsets
lowerCAmelCase__ :Any = True
if changes_to_apply:
lowerCAmelCase__ :int = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Any = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCAmelCase )
lowerCAmelCase__ :Dict = ' '.join(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.encode(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.model_max_length:
lowerCAmelCase__ :Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 93 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_a: List[str] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Optional[int] , **lowerCAmelCase : List[str] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ = deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
UpperCAmelCase_ = kwargs.pop("torchscript" , self.torchscript )
UpperCAmelCase_ = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
UpperCAmelCase_ = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = field(default=lowercase , metadata={'help': 'Trace the models using torchscript'} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
SCREAMING_SNAKE_CASE__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
UpperCAmelCase_ = torch.device("cpu" )
UpperCAmelCase_ = 0
elif is_torch_tpu_available():
UpperCAmelCase_ = xm.xla_device()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCAmelCase_ = torch.cuda.device_count()
return device, n_gpu
@property
def __A ( self : int ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __A ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __A ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def __A ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.n_gpu > 0 | 162 | 0 |
import math
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
__UpperCamelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case )
if number < 1:
__UpperCamelCase = f"Input value of [number={number}] must be > 0"
raise ValueError(snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , snake_case ):
for _ in range(snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
lowercase__ : Tuple = 0
try:
lowercase__ : Optional[int] = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 701 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'table-transformer'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = backbone_config.get('''model_type''' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = version.parse('1.11' )
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-5
@property
def A__ ( self )-> int:
'''simple docstring'''
return 12
| 451 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : str = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ : int = {
'camembert-base': 5_12,
}
lowerCAmelCase_ : Tuple = '▁'
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : List[Any] , __a : Union[str, Any] , __a : Any="<s>" , __a : List[Any]="</s>" , __a : str="</s>" , __a : Any="<s>" , __a : Any="<unk>" , __a : Tuple="<pad>" , __a : List[Any]="<mask>" , __a : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , __a : Tuple = None , **__a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_a = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_a = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
_a = len(self.fairseq_tokens_to_ids )
_a = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : List[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[Any] , __a : str , __a : int = None , __a : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def UpperCamelCase__ ( self : Dict , __a : Dict , __a : int = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase__ ( self : Dict ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Tuple ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] ):
_a = []
_a = ""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__a )
_a = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self : Dict ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Union[str, Any] , __a : int ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self : str , __a : Tuple , __a : List[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 692 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ (unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoConfig.from_pretrained("gpt2" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
"max_new_tokens": 1024,
"foo": "bar",
}
lowerCAmelCase_ = copy.deepcopy(_a )
lowerCAmelCase_ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {"foo": "bar"} )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , "foo" ) # no new kwargs should be initialized if from config
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ (unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def __a ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="test-generation-config" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="valid_org/test-generation-config-org" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 226 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _a ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE=10_26 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase__ = load_gpta('''gpt2''' ).to(SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=1_28 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
lowercase__ = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10_00 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ):
"""simple docstring"""
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase__ = RandomSampler(SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
lowercase__ = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase__ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase__ = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Optional[Any] = list[list[int]]
# assigning initial values to the grid
__SCREAMING_SNAKE_CASE : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__SCREAMING_SNAKE_CASE : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case_ ( lowercase__ : Matrix , lowercase__ : int , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case_ ( lowercase__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case_ ( lowercase__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCAmelCase =digit
if sudoku(lowercase__ ) is not None:
return grid
_lowerCAmelCase =0
return None
def snake_case_ ( lowercase__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowercase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__SCREAMING_SNAKE_CASE : List[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 149 |
def snake_case_ ( lowercase__ : list[int] ):
'''simple docstring'''
_lowerCAmelCase =[]
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
_lowerCAmelCase =nums.pop(0 )
_lowerCAmelCase =permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def snake_case_ ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def backtrack(lowercase__ : List[Any] ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
_lowerCAmelCase , _lowerCAmelCase =nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase =nums[i], nums[start] # backtrack
_lowerCAmelCase =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__SCREAMING_SNAKE_CASE : Any = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 149 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=[0.5, 0.5, 0.5] , ):
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size if size is not None else {"height": 18, "width": 20}
A__ = do_thumbnail
A__ = do_align_axis
A__ = do_pad
A__ = do_normalize
A__ = image_mean
A__ = image_std
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Any = DonutImageProcessor if is_vision_available() else None
def __A ( self ):
A__ = DonutImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_thumbnail" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_align_long_axis" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_pad" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
def __A ( self ):
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def __A ( self ):
pass
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 491 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case :str = True
except ImportError:
__snake_case :str = False
__snake_case :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( _UpperCAmelCase ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _A ( __UpperCAmelCase ):
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser):
'''simple docstring'''
__a = parser.add_parser('''add-new-model''')
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''')
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''')
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''')
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE)
def __init__( self : Any , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=None , *__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = testing
__a = testing_file
__a = path
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''')
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__a = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''')
__a = (
Path(__SCREAMING_SNAKE_CASE).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__a = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE))
else:
with open(self._testing_file , '''r''') as configuration_file:
__a = json.load(__SCREAMING_SNAKE_CASE)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
__a = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''') as configuration_file:
__a = json.load(__SCREAMING_SNAKE_CASE)
__a = configuration['''lowercase_modelname''']
__a = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'{directory}/configuration.json')
__a = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__a = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__a = '''Flax''' in generate_tensorflow_pytorch_and_flax
__a = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=__SCREAMING_SNAKE_CASE)
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w'''):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE : List[str]):
with open(__SCREAMING_SNAKE_CASE , '''r''') as f:
__a = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE)
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py')
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py')
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]):
# Create temp file
__a , __a = mkstemp()
__a = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''') as new_file:
with open(__SCREAMING_SNAKE_CASE) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE)
if line_to_copy_below in line:
__a = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE)
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.')
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Remove original file
remove(__SCREAMING_SNAKE_CASE)
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def skip_units(__SCREAMING_SNAKE_CASE : Union[str, Any]):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE : str):
with open(__SCREAMING_SNAKE_CASE) as datafile:
__a = []
__a = False
__a = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__a = line.split('''"''')[1]
__a = skip_units(__SCREAMING_SNAKE_CASE)
elif "# Below: " in line and "##" not in line:
__a = line.split('''"''')[1]
__a = skip_units(__SCREAMING_SNAKE_CASE)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = []
elif "# Replace with" in line and "##" not in line:
__a = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE)
remove(__SCREAMING_SNAKE_CASE)
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py')
os.rmdir(__SCREAMING_SNAKE_CASE)
| 60 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 1 |
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__SCREAMING_SNAKE_CASE: Any = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE: Any = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE: Union[str, Any] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : jnp.ndarray
@flax_register_to_config
class a ( nn.Module ,__lowercase ,__lowercase ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE__ : int = 1280
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : bool = False
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE: Tuple = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = jax.random.split(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.block_out_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE: Any = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE: str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE: int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE: List[str] = output_channel
__SCREAMING_SNAKE_CASE: str = block_out_channels[i]
__SCREAMING_SNAKE_CASE: Any = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE: str = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: Tuple = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = down_blocks
# mid
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__SCREAMING_SNAKE_CASE: Optional[int] = []
__SCREAMING_SNAKE_CASE: Tuple = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__SCREAMING_SNAKE_CASE: int = output_channel
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[min(i + 1 , len(_lowerCAmelCase ) - 1 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = i == len(_lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__SCREAMING_SNAKE_CASE: Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: int = FlaxUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = output_channel
__SCREAMING_SNAKE_CASE: Union[str, Any] = up_blocks
# out
__SCREAMING_SNAKE_CASE: Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = True , _lowerCAmelCase = False , ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
__SCREAMING_SNAKE_CASE: Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE: Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.expand_dims(_lowerCAmelCase , 0 )
__SCREAMING_SNAKE_CASE: Any = self.time_proj(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE: List[str] = self.conv_in(_lowerCAmelCase )
# 3. down
__SCREAMING_SNAKE_CASE: Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCAmelCase , _lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE: Union[str, Any] = new_down_block_res_samples
# 4. mid
__SCREAMING_SNAKE_CASE: Dict = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = up_block(
_lowerCAmelCase , temb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train , )
else:
__SCREAMING_SNAKE_CASE: List[str] = up_block(_lowerCAmelCase , temb=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train )
# 6. post-process
__SCREAMING_SNAKE_CASE: Optional[Any] = self.conv_norm_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.conv_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCAmelCase )
| 202 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : list[tuple[float, float]] ):
snake_case__ : int = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case__ : Dict = len(__a ) - 1
def lowercase ( self : Tuple , __a : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case__ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__a ) , 5 ) == 1
return output_values
def lowercase ( self : Optional[int] , __a : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case__ : Optional[int] = self.basis_function(__a )
snake_case__ : Optional[Any] = 0.0
snake_case__ : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase ( self : str , __a : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
snake_case__ : list[float] = [] # x coordinates of points to plot
snake_case__ : list[float] = [] # y coordinates of points to plot
snake_case__ : Tuple = 0.0
while t <= 1:
snake_case__ : List[str] = self.bezier_curve_function(__a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case__ : Union[str, Any] = [i[0] for i in self.list_of_points]
snake_case__ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__a , __a , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__a , __a , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 127 |
import re
import string
import numpy as np
import datasets
lowercase_: Optional[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase_: Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase_: str = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ (datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowercase ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int]=None , __a : int=False , __a : Any=False , __a : Dict=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ : Union[str, Any] = np.array([re.sub(__a , """""" , __a ) for x in predictions] )
snake_case__ : Union[str, Any] = np.array([re.sub(__a , """""" , __a ) for x in references] )
else:
snake_case__ : List[str] = np.asarray(__a )
snake_case__ : int = np.asarray(__a )
if ignore_case:
snake_case__ : str = np.char.lower(__a )
snake_case__ : Tuple = np.char.lower(__a )
if ignore_punctuation:
snake_case__ : str = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case__ : List[Any] = np.char.translate(__a , table=__a )
snake_case__ : Tuple = np.char.translate(__a , table=__a )
if ignore_numbers:
snake_case__ : Union[str, Any] = string.digits.maketrans("""""" , """""" , string.digits )
snake_case__ : Dict = np.char.translate(__a , table=__a )
snake_case__ : int = np.char.translate(__a , table=__a )
snake_case__ : Any = predictions == references
return {"exact_match": np.mean(__a ) * 1_0_0}
| 127 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 555 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 555 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = MvpTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = MvpTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Tuple = filter_roberta_detectors
def a ( self ):
super().setUp()
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , snake_case ):
return "lower newer", "lower newer"
@cached_property
def a ( self ):
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a ( self ):
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a ( self ):
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def a ( self ):
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , snake_case )
self.assertIn('attention_mask' , snake_case )
self.assertNotIn('labels' , snake_case )
self.assertNotIn('decoder_attention_mask' , snake_case )
@require_torch
def a ( self ):
snake_case_ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(text_target=snake_case , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a ( self ):
snake_case_ = ['A long paragraph for summarization.']
snake_case_ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(snake_case , text_target=snake_case , return_tensors='pt' )
snake_case_ = inputs['input_ids']
snake_case_ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a ( self ):
pass
def a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
snake_case_ = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
snake_case_ = 'A, <mask> AllenNLP sentence.'
snake_case_ = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
snake_case_ = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 108 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray]
__SCREAMING_SNAKE_CASE : Optional[List[bool]]
__SCREAMING_SNAKE_CASE : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 108 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""sentencepiece"""]
def __init__( self : str , *a_ : Any , **a_ : List[str] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Optional[int] , **a_ : Any ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Any , **a_ : List[str] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : str ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : str = ["""sentencepiece"""]
def __init__( self : str , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Dict , **a_ : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : str = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : str , **a_ : Any ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : Optional[int] , **a_ : int ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""sentencepiece"""]
def __init__( self : Any , *a_ : Any , **a_ : Any ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : List[str] , **a_ : str ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Union[str, Any] , **a_ : int ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : str = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : Dict , **a_ : List[str] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""sentencepiece"""]
def __init__( self : int , *a_ : int , **a_ : List[str] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : Any , **a_ : Optional[int] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : List[Any] , **a_ : int ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : Union[str, Any] , **a_ : Dict ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : str = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : Any , **a_ : Dict ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Any ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *a_ : List[Any] , **a_ : Any ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Any , *a_ : int , **a_ : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : str , *a_ : str , **a_ : List[str] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""sentencepiece"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : str = ["""sentencepiece"""]
def __init__( self : str , *a_ : Tuple , **a_ : Tuple ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : int , **a_ : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : int , **a_ : Any ):
requires_backends(self , ["sentencepiece"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *a_ : int , **a_ : int ):
requires_backends(self , ["sentencepiece"] )
| 610 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = GPTaTokenizer
a_ : Dict = GPTaTokenizerFast
a_ : List[Any] = True
a_ : str = {"""add_prefix_space""": True}
a_ : Dict = False
def lowerCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Optional[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase ( self : Optional[Any] , **a_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : List[Any] , **a_ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Tuple , a_ : Optional[Any] ):
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : Dict = "lower newer"
return input_text, output_text
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : List[str] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : int = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : int = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : List[Any] = tokenizer.encode(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
lowerCAmelCase_ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : Dict , *a_ : Optional[int] , **a_ : Union[str, Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase ( self : Optional[int] , a_ : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
lowerCAmelCase_ : Optional[int] = "This is a simple input"
lowerCAmelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : List[str] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Any = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
lowerCAmelCase_ : Dict = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = "$$$"
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase_ : int = tokenizer(a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : int = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase ( self : List[str] ):
pass
def lowerCamelCase ( self : List[Any] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase_ : int = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[Any] = "Encode this."
lowerCAmelCase_ : List[str] = "This one too please."
lowerCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : Dict = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
lowerCAmelCase_ : List[str] = encoded_sequence_dict["input_ids"]
lowerCAmelCase_ : Optional[Any] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a_ ) , len(a_ ) )
lowerCAmelCase_ : str = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
lowerCAmelCase_ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("./test_opt" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a_ )
lowerCAmelCase_ : Any = "A photo of a cat"
lowerCAmelCase_ : List[str] = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : Tuple = "bos"
lowerCAmelCase_ : Dict = tokenizer.get_vocab()["bos"]
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase_ : int = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 610 | 1 |
UpperCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
a_ = Stack()
a_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
a_ = operator_stack.peek()
operator_stack.pop()
a_ = operand_stack.peek()
operand_stack.pop()
a_ = operand_stack.peek()
operand_stack.pop()
a_ = operators[opr](_A , _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 143 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
UpperCamelCase__ = {
'''google/pegasus-xsum''': 512,
}
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str]="<pad>" , lowercase__ : Any="</s>" , lowercase__ : Union[str, Any]="<unk>" , lowercase__ : Any="<mask_2>" , lowercase__ : int="<mask_1>" , lowercase__ : List[Any]=None , lowercase__ : List[str]=1_0_3 , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[Any] , ):
a_ = offset
if additional_special_tokens is not None:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase__ )}, but is"
f" {type(lowercase__ )}" )
a_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase__ ) , self.offset - 1 )
]
if len(set(lowercase__ ) ) != len(lowercase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
a_ = additional_special_tokens_extended
else:
a_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , mask_token=lowercase__ , pad_token=lowercase__ , mask_token_sent=lowercase__ , offset=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
a_ = mask_token_sent
a_ = vocab_file
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# add special tokens to encoder dict
a_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
a_ = {v: k for k, v in self.encoder.items()}
@property
def __magic_name__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.offset
def __magic_name__ ( self : Dict ):
a_ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self : Tuple , lowercase__ : str ):
a_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ = {}
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self : Tuple , lowercase__ : str ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __magic_name__ ( self : List[Any] , lowercase__ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a_ = self.sp_model.piece_to_id(lowercase__ )
return sp_id + self.offset
def __magic_name__ ( self : str , lowercase__ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__ ( self : Optional[int] , lowercase__ : List[str] ):
a_ = []
a_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
a_ = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __magic_name__ ( self : Tuple , lowercase__ : Optional[int]=False ):
return 1
def __magic_name__ ( self : Any , lowercase__ : Any ):
a_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : List , lowercase__ : Optional[List] = None , lowercase__ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase__ )
elif token_ids_a is None:
return self._special_token_mask(lowercase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 143 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCamelCase__ ( lowercase__ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCamelCase__ ( lowercase__ : str ):
# word like '180' or '身高' or '神'
for char in word:
snake_case : Any = ord(lowercase__ )
if not _is_chinese_char(lowercase__ ):
return 0
return 1
def UpperCamelCase__ ( lowercase__ : List[str] ):
snake_case : Any = set()
for token in tokens:
snake_case : int = len(lowercase__ ) > 1 and is_chinese(lowercase__ )
if chinese_word:
word_set.add(lowercase__ )
snake_case : Dict = list(lowercase__ )
return word_list
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : set() ):
if not chinese_word_set:
return bert_tokens
snake_case : Tuple = max([len(lowercase__ ) for w in chinese_word_set] )
snake_case : Optional[int] = bert_tokens
snake_case , snake_case : Union[str, Any] = 0, len(lowercase__ )
while start < end:
snake_case : Dict = True
if is_chinese(bert_word[start] ):
snake_case : List[str] = min(end - start , lowercase__ )
for i in range(lowercase__ , 1 , -1 ):
snake_case : List[Any] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case : Dict = "##" + bert_word[j]
snake_case : Dict = start + i
snake_case : Tuple = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : LTP , lowercase__ : BertTokenizer ):
snake_case : Optional[int] = []
for i in range(0 , len(lowercase__ ) , 100 ):
snake_case : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
snake_case : Union[str, Any] = [get_chinese_word(lowercase__ ) for r in res]
ltp_res.extend(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
snake_case : int = []
for i in range(0 , len(lowercase__ ) , 100 ):
snake_case : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase__ , truncation=lowercase__ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(lowercase__ ) == len(lowercase__ )
snake_case : Tuple = []
for input_ids, chinese_word in zip(lowercase__ , lowercase__ ):
snake_case : Any = []
for id in input_ids:
snake_case : Dict = bert_tokenizer._convert_id_to_token(lowercase__ )
input_tokens.append(lowercase__ )
snake_case : Any = add_sub_symbol(lowercase__ , lowercase__ )
snake_case : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase__ ):
if token[:2] == "##":
snake_case : Dict = token[2:]
# save chinese tokens' pos
if len(lowercase__ ) == 1 and _is_chinese_char(ord(lowercase__ ) ):
ref_id.append(lowercase__ )
ref_ids.append(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
return ref_ids
def UpperCamelCase__ ( lowercase__ : List[str] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case : List[Any] = f.readlines()
snake_case : str = [line.strip() for line in data if len(lowercase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case : int = LTP(args.ltp ) # faster in GPU device
snake_case : Any = BertTokenizer.from_pretrained(args.bert )
snake_case : Union[str, Any] = prepare_ref(lowercase__ , lowercase__ , lowercase__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case : List[str] = [json.dumps(lowercase__ ) + "\n" for ref in ref_ids]
f.writelines(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__A = parser.parse_args()
main(args)
| 134 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCamelCase__ ( lowercase__ : List[Any] ):
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case : Optional[int] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
snake_case : int = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
snake_case : Optional[Any] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
snake_case : int = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
snake_case : int = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
snake_case : int = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
snake_case : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
snake_case : Tuple = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
snake_case : Union[str, Any] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
snake_case : Union[str, Any] = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case : Optional[int] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
snake_case : List[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
snake_case : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
snake_case : Union[str, Any] = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
snake_case : int = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
snake_case : Optional[int] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
snake_case : Dict = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
snake_case : List[Any] = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
snake_case : str = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case : List[str] = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
snake_case : Union[str, Any] = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
snake_case : Optional[int] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
snake_case : int = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : List[str] ):
for key in orig_state_dict.copy().keys():
snake_case : int = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case : str = key.split("." )
snake_case , snake_case : int = int(key_split[2] ), int(key_split[4] )
snake_case : Optional[Any] = config.vision_config.hidden_size
if "weight" in key:
snake_case : List[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Dict = val[-dim:, :]
else:
snake_case : List[Any] = val[:dim]
snake_case : Any = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case : Tuple = key.split("." )
snake_case : List[Any] = int(key_split[3] )
snake_case : Optional[int] = config.text_config.hidden_size
if "weight" in key:
snake_case : Union[str, Any] = val[:dim, :]
snake_case : Any = val[
dim : dim * 2, :
]
snake_case : Dict = val[-dim:, :]
else:
snake_case : List[Any] = val[:dim]
snake_case : List[str] = val[dim : dim * 2]
snake_case : Optional[int] = val[-dim:]
else:
snake_case : Union[str, Any] = rename_key(lowercase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case : List[str] = val.squeeze_()
else:
snake_case : Union[str, Any] = val
return orig_state_dict
def UpperCamelCase__ ( ):
snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any]="groupvit-gcc-yfcc" , lowercase__ : Any=False ):
snake_case : List[Any] = GroupViTConfig()
snake_case : Union[str, Any] = GroupViTModel(lowercase__ ).eval()
snake_case : str = torch.load(lowercase__ , map_location="cpu" )["model"]
snake_case : Any = convert_state_dict(lowercase__ , lowercase__ )
snake_case , snake_case : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase__ ) == 0)
# verify result
snake_case : Optional[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
snake_case : List[str] = prepare_img()
snake_case : Union[str, Any] = processor(text=["a photo of a cat", "a photo of a dog"] , images=lowercase__ , padding=lowercase__ , return_tensors="pt" )
with torch.no_grad():
snake_case : Optional[int] = model(**lowercase__ )
if model_name == "groupvit-gcc-yfcc":
snake_case : List[Any] = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case : Tuple = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowercase__ , atol=1E-3 )
processor.save_pretrained(lowercase__ )
model.save_pretrained(lowercase__ )
print("Successfully saved processor and model to" , lowercase__ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(lowercase__ , organization="nielsr" )
model.push_to_hub(lowercase__ , organization="nielsr" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
__A = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 134 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : str )-> Dict:
lowerCamelCase__ : Tuple =dataset
lowerCamelCase__ : Optional[Any] =process
lowerCamelCase__ : Any =params
def __len__( self : Any )-> Optional[Any]:
return len(self.dataset )
def __getitem__( self : str, lowerCamelCase : List[str] )-> List[Any]:
lowerCamelCase__ : str =self.dataset[i]
lowerCamelCase__ : List[str] =self.process(lowerCamelCase, **self.params )
return processed
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None )-> int:
lowerCamelCase__ : str =loader
lowerCamelCase__ : Union[str, Any] =infer
lowerCamelCase__ : Optional[int] =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : int =None
lowerCamelCase__ : Optional[Any] =loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] =None
lowerCamelCase__ : str =None
def __len__( self : Optional[int] )-> Tuple:
return len(self.loader )
def __iter__( self : Any )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =iter(self.loader )
return self
def snake_case ( self : List[str] )-> Optional[Any]:
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : Dict =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : Optional[int] ={}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : Optional[int] =element.to_tuple()
if isinstance(element[0], torch.Tensor ):
lowerCamelCase__ : Any =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
lowerCamelCase__ : Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase, lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
lowerCamelCase__ : List[str] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
lowerCamelCase__ : Tuple =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[Any] =None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : str =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Union[str, Any] =np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : Optional[int] =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] =self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def snake_case ( self : str )-> int:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Union[str, Any] =next(self.iterator )
lowerCamelCase__ : List[str] =self.infer(lowerCamelCase, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase, torch.Tensor ):
lowerCamelCase__ : Union[str, Any] =processed
else:
lowerCamelCase__ : Union[str, Any] =list(processed.keys() )[0]
lowerCamelCase__ : List[str] =processed[key]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Dict =len(lowerCamelCase )
else:
lowerCamelCase__ : Optional[Any] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : List[Any] =observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : Any =processed
lowerCamelCase__ : Any =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int]=None )-> List[str]:
super().__init__(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def __iter__( self : int )-> int:
lowerCamelCase__ : List[Any] =iter(self.loader )
lowerCamelCase__ : Optional[int] =None
return self
def snake_case ( self : List[Any] )-> List[str]:
if self.subiterator is None:
lowerCamelCase__ : List[str] =self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
lowerCamelCase__ : str =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : int =self.infer(next(self.iterator ), **self.params )
lowerCamelCase__ : Union[str, Any] =next(self.subiterator )
return processed
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __iter__( self : int )-> Union[str, Any]:
lowerCamelCase__ : int =iter(self.loader )
return self
def snake_case ( self : List[Any] )-> List[str]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : Dict =False
lowerCamelCase__ : str =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Tuple =self.loader_batch_item()
lowerCamelCase__ : List[Any] =item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : Optional[int] =self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase, torch.Tensor ):
lowerCamelCase__ : Optional[int] =processed
else:
lowerCamelCase__ : Any =list(processed.keys() )[0]
lowerCamelCase__ : Union[str, Any] =processed[key]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[Any] =len(lowerCamelCase )
else:
lowerCamelCase__ : Any =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Optional[int] =observed_batch_size
lowerCamelCase__ : Optional[int] =processed
lowerCamelCase__ : Dict =0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Tuple =self.loader_batch_item()
lowerCamelCase__ : str =item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
lowerCamelCase__ : str =processed
lowerCamelCase__ : Dict =item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
return accumulator
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Dataset, lowerCamelCase : str )-> Union[str, Any]:
lowerCamelCase__ : int =dataset
lowerCamelCase__ : Optional[Any] =key
def __len__( self : int )-> Optional[Any]:
return len(self.dataset )
def __getitem__( self : Tuple, lowerCamelCase : List[str] )-> List[str]:
return self.dataset[i][self.key]
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Dataset, lowerCamelCase : str, lowerCamelCase : str )-> str:
lowerCamelCase__ : str =dataset
lowerCamelCase__ : Tuple =keya
lowerCamelCase__ : Any =keya
def __len__( self : Optional[int] )-> List[Any]:
return len(self.dataset )
def __getitem__( self : Union[str, Any], lowerCamelCase : Tuple )-> Union[str, Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Any = int(_SCREAMING_SNAKE_CASE )
_lowercase , _lowercase , _lowercase : str = t // 3_600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=300 ) -> Union[str, Any]:
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : str = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : List[Any] = F"""{elt:.6f}""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(_SCREAMING_SNAKE_CASE )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase_ :
_UpperCamelCase : List[str] = 5
_UpperCamelCase : Optional[int] = 0.2
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 3_0_0 , ):
_lowercase : List[Any] = total
_lowercase : List[Any] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : Dict = parent
_lowercase : Union[str, Any] = width
_lowercase : str = None
_lowercase : Dict = None
_lowercase : Optional[int] = None
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ):
_lowercase : str = value
if comment is not None:
_lowercase : List[Any] = comment
if self.last_value is None:
_lowercase : Optional[Any] = time.time()
_lowercase : List[Any] = value
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = self.warmup
_lowercase : Union[str, Any] = 1
self.update_bar(lowerCAmelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : Union[str, Any] = time.time()
_lowercase : Union[str, Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : str = self.elapsed_time / (value - self.start_value)
else:
_lowercase : Optional[int] = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : Optional[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase_ )
_lowercase : List[Any] = value
_lowercase : List[str] = current_time
if self.average_time_per_item is None:
_lowercase : Union[str, Any] = 1
else:
_lowercase : List[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : List[str] = ' ' * (len(str(self.total ) ) - len(str(lowerCAmelCase_ ) )) + str(lowerCAmelCase_ )
if self.elapsed_time is None:
_lowercase : Optional[Any] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_lowercase : List[str] = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_lowercase : Dict = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __a ( self ):
_lowercase : Tuple = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : int = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __a ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class lowerCAmelCase_ ( a_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(lowerCAmelCase_ )
_lowercase : Tuple = None if column_names is None else [column_names]
_lowercase : Tuple = None
def __a ( self ):
_lowercase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : int = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __a ( self , _lowerCAmelCase ):
if self.inner_table is None:
_lowercase : Tuple = [list(values.keys() ), list(values.values() )]
else:
_lowercase : List[str] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase_ )
_lowercase : Tuple = columns
self.inner_table.append([values[c] for c in columns] )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=3_0_0 ):
_lowercase : Tuple = NotebookProgressBar(lowerCAmelCase_ , prefix=lowerCAmelCase_ , parent=self , width=lowerCAmelCase_ )
return self.child_bar
def __a ( self ):
_lowercase : Optional[Any] = None
self.display()
class lowerCAmelCase_ ( a_ ):
def __init__( self ):
_lowercase : Optional[Any] = None
_lowercase : Dict = None
_lowercase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : int = 0
_lowercase : Optional[int] = 0
_lowercase : str = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowercase : Tuple = NotebookTrainingTracker(state.max_steps , lowerCAmelCase_ )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : List[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_lowercase : Tuple = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
if not has_length(lowerCAmelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCAmelCase_ ) )
else:
_lowercase : Any = NotebookProgressBar(len(lowerCAmelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : List[Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : int = state.global_step
self.training_tracker.write_line(lowerCAmelCase_ )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
if self.training_tracker is not None:
_lowercase : str = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_lowercase : Optional[int] = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Optional[Any] = int(state.epoch )
else:
_lowercase : List[Any] = state.global_step
_lowercase : Tuple = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_lowercase : List[str] = re.sub(r'\_loss$' , '' , lowerCAmelCase_ )
_lowercase : str = metrics.pop('total_flos' , lowerCAmelCase_ )
_lowercase : Tuple = metrics.pop('epoch' , lowerCAmelCase_ )
_lowercase : int = metrics.pop(F"""{metric_key_prefix}_runtime""" , lowerCAmelCase_ )
_lowercase : List[Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , lowerCAmelCase_ )
_lowercase : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , lowerCAmelCase_ )
_lowercase : Optional[int] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , lowerCAmelCase_ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_lowercase : Tuple = v
else:
_lowercase : Tuple = k.split('_' )
_lowercase : List[str] = ' '.join([part.capitalize() for part in splits[1:]] )
_lowercase : int = v
self.training_tracker.write_line(lowerCAmelCase_ )
self.training_tracker.remove_child()
_lowercase : Any = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Any = True
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCAmelCase_ )
_lowercase : Any = None
| 66 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Optional[int]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : int ) -> List[str]:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 186 | 0 |
import string
def a ( A__ : str ) -> str:
"""simple docstring"""
_lowercase =''
for i in sequence:
_lowercase =ord(A__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def a ( A__ : str ) -> str:
"""simple docstring"""
_lowercase =string.ascii_letters
_lowercase =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A__ )] if c in letters else c for c in sequence )
def a ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running performance benchmarks...' )
_lowercase ='from string import printable ; from __main__ import atbash, atbash_slow'
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=A__ )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=A__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 380 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=False , ) -> Tuple:
'''simple docstring'''
_lowercase =size if size is not None else {'height': 20, 'width': 20}
_lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =do_resize
_lowercase =size
_lowercase =do_center_crop
_lowercase =crop_size
_lowercase =do_normalize
_lowercase =image_mean
_lowercase =image_std
_lowercase =do_reduce_labels
def A__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a ( ) -> Tuple:
"""simple docstring"""
_lowercase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase =Image.open(dataset[0]['file'] )
_lowercase =Image.open(dataset[1]['file'] )
return image, map
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase =Image.open(ds[0]['file'] )
_lowercase =Image.open(ds[1]['file'] )
_lowercase =Image.open(ds[2]['file'] )
_lowercase =Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = BeitImageProcessor if is_vision_available() else None
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =BeitImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
_lowercase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
_lowercase =[]
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
_lowercase , _lowercase =prepare_semantic_single_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
_lowercase , _lowercase =prepare_semantic_batch_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase =prepare_semantic_single_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
_lowercase =True
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 380 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 | """simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ = len(_snake_case )
SCREAMING_SNAKE_CASE__ = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE__ = array[0]
for i in range(1 , _snake_case ):
SCREAMING_SNAKE_CASE__ = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int ) -> bool:
SCREAMING_SNAKE_CASE__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_snake_case )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 703 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ):
# Initialise PyTorch model
UpperCamelCase_ : List[Any] = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ : Tuple = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 138 | 0 |
"""simple docstring"""
def lowercase (_snake_case ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
__UpperCamelCase = [True] * (num + 1)
__UpperCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,_snake_case ):
__UpperCamelCase = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num)) | 505 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 505 | 1 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
while len(lowercase__ ) > 1:
UpperCAmelCase_ =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCAmelCase_ =files.index(min(lowercase__ ) )
temp += files[min_index]
files.pop(lowercase__ )
files.append(lowercase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import sys
import turtle
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase : str =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 550 | 0 |
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list[int] , lowercase__ : int ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = len(lowercase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase_ : Dict = i + 1
else:
lowerCAmelCase_ : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 600 |
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : Optional[torch.FloatTensor] = None
class __A ( A_ ,A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = 2
@register_to_config
def __init__( self : int ,_snake_case : float = 0.02 ,_snake_case : float = 100 ,_snake_case : float = 1.007 ,_snake_case : float = 80 ,_snake_case : float = 0.05 ,_snake_case : float = 50 ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = sigma_max
# setable values
lowercase__ : int = None
lowercase__ : np.IntTensor = None
lowercase__ : torch.FloatTensor = None # sigma(t_i)
def UpperCAmelCase ( self : Dict ,_snake_case : torch.FloatTensor ,_snake_case : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCAmelCase ( self : str ,_snake_case : int ,_snake_case : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = num_inference_steps
lowercase__ : Optional[Any] = np.arange(0 ,self.num_inference_steps )[::-1].copy()
lowercase__ : Optional[Any] = torch.from_numpy(_snake_case ).to(_snake_case )
lowercase__ : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : Union[str, Any] = torch.tensor(_snake_case ,dtype=torch.floataa ,device=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.FloatTensor ,_snake_case : float ,_snake_case : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any] = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
lowercase__ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any] = self.config.s_noise * randn_tensor(sample.shape ,generator=_snake_case ).to(sample.device )
lowercase__ : Optional[int] = sigma + gamma * sigma
lowercase__ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : torch.FloatTensor ,_snake_case : float ,_snake_case : float ,_snake_case : torch.FloatTensor ,_snake_case : bool = True ,) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
lowercase__ : List[str] = sample_hat + sigma_hat * model_output
lowercase__ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : torch.FloatTensor ,_snake_case : float ,_snake_case : float ,_snake_case : torch.FloatTensor ,_snake_case : torch.FloatTensor ,_snake_case : torch.FloatTensor ,_snake_case : bool = True ,) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
lowercase__ : str = sample_prev + sigma_prev * model_output
lowercase__ : str = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : List[str] ,_snake_case : Dict ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 122 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase_ = data_utils.TransfoXLTokenizer
lowerCAmelCase_ = data_utils.TransfoXLCorpus
lowerCAmelCase_ = data_utils
lowerCAmelCase_ = data_utils
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCamelCase , '''rb''' ) as fp:
lowercase__ : Any = pickle.load(__lowerCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase__ : str = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase__ : Dict = corpus.vocab.__dict__
torch.save(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCamelCase )
lowercase__ : int = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase__ : int = os.path.abspath(__lowerCamelCase )
lowercase__ : List[Any] = os.path.abspath(__lowerCamelCase )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase__ : Union[str, Any] = TransfoXLConfig()
else:
lowercase__ : str = TransfoXLConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : List[str] = TransfoXLLMHeadModel(__lowerCamelCase )
lowercase__ : Tuple = load_tf_weights_in_transfo_xl(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowercase__ : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 122 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def a ( self : Optional[int] ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=__A , )
def a ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def a ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def a ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=__A , )
def a ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def a ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def A_ ( ):
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def A_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
"""simple docstring"""
@require_beam
def a ( self : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=__A , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __A )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __A )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def a ( self : Tuple ):
"""simple docstring"""
import apache_beam as beam
_lowerCAmelCase = beam.io.parquetio.WriteToParquet
_lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=__A , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
_lowerCAmelCase = partial(__A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __A )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def a ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a ( self : Dict ):
"""simple docstring"""
_lowerCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = NestedBeamDataset(cache_dir=__A , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __A )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __A )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 309 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Union[str, Any]=1_3 , __A : Optional[int]=7 , __A : Any=True , __A : Tuple=True , __A : Optional[Any]=True , __A : Optional[Any]=True , __A : Optional[Any]=9_9 , __A : List[Any]=3_2 , __A : Union[str, Any]=5 , __A : Optional[int]=4 , __A : Any=3_7 , __A : int="gelu" , __A : List[Any]=0.1 , __A : Union[str, Any]=0.1 , __A : Union[str, Any]=5_1_2 , __A : List[Any]=1_6 , __A : Optional[Any]=2 , __A : Optional[Any]=0.0_2 , __A : List[str]=4 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_choices
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = None
if self.use_attention_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__A , )
return config, input_ids, attention_mask
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = FlaxDistilBertModelTester(self )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained("distilbert-base-uncased" )
_lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowercase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase = model(__A , attention_mask=__A )[0]
_lowercase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , __A )
_lowercase = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 497 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['''pixel_values''']
def __init__( self :Optional[Any] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :str , ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {'''shortest_edge''': 224}
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__SCREAMING_SNAKE_CASE : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name='''crop_size''' )
__SCREAMING_SNAKE_CASE : Dict = do_resize
__SCREAMING_SNAKE_CASE : str = size
__SCREAMING_SNAKE_CASE : Dict = resample
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Tuple = crop_size
__SCREAMING_SNAKE_CASE : Any = do_rescale
__SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
__SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_convert_rgb
def __magic_name__( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[int] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[Any] , ) -> Union[str, Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Tuple , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :int = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :List[Any] , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE : List[Any] = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name='''size''' , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : Any = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__SCREAMING_SNAKE_CASE : List[Any] = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : List[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : str = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : List[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Any = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Any = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 260 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ : str = 2
@register_to_config
def __init__( self :Tuple , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0_0085 , lowerCAmelCase__ :float = 0.012 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :str = "linspace" , lowerCAmelCase__ :int = 0 , ) -> List[Any]:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : Tuple = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Dict = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Any=None ) -> Tuple:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE : Any = self.timesteps
__SCREAMING_SNAKE_CASE : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if len(lowerCAmelCase__ ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE : str = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
__SCREAMING_SNAKE_CASE : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__( self :Optional[Any] ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__( self :Any , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE : str = self.index_for_timestep(lowerCAmelCase__ )
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None , lowerCAmelCase__ :Optional[int] = None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = num_inference_steps
__SCREAMING_SNAKE_CASE : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE : str = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Any = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(lowerCAmelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase__ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(np.log(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ )
# interpolate sigmas
__SCREAMING_SNAKE_CASE : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__SCREAMING_SNAKE_CASE : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE : Any = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : str = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
# interpolate timesteps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigma_to_t(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=timesteps.dtype )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__SCREAMING_SNAKE_CASE : int = torch.cat([timesteps[:1], interleaved_timesteps] )
__SCREAMING_SNAKE_CASE : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE : List[str] = defaultdict(lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
# get log sigma
__SCREAMING_SNAKE_CASE : int = sigma.log()
# get distribution
__SCREAMING_SNAKE_CASE : str = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__SCREAMING_SNAKE_CASE : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE : Any = low_idx + 1
__SCREAMING_SNAKE_CASE : Tuple = self.log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE : Dict = self.log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE : List[Any] = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE : Dict = w.clamp(0 , 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE : Tuple = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE : List[Any] = t.view(sigma.shape )
return t
@property
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
return self.sample is None
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ :Union[float, torch.FloatTensor] , lowerCAmelCase__ :Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ :bool = True , ) -> Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE : Dict = self.index_for_timestep(lowerCAmelCase__ )
# advance index counter by 1
__SCREAMING_SNAKE_CASE : Tuple = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE : Dict = self.sigmas_interpol[step_index + 1]
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE : Any = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : str = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Any = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__SCREAMING_SNAKE_CASE : Any = sigma_next - sigma_hat
__SCREAMING_SNAKE_CASE : Optional[int] = self.sample
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : List[str] = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Dict = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__ ) for t in timesteps]
__SCREAMING_SNAKE_CASE : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : List[Any] = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self :Tuple ) -> Optional[Any]:
return self.config.num_train_timesteps
| 260 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> np.array:
UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase = np.zeros((n + 1,) )
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(__UpperCamelCase ):
UpperCamelCase = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 |
'''simple docstring'''
import argparse
import copy
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = {}
with open(__UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
with open(__UpperCamelCase ) as f:
UpperCamelCase = f.read(1 )
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__UpperCamelCase ) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(__UpperCamelCase )
UpperCamelCase = distance_of_first_solution + int(__UpperCamelCase )
UpperCamelCase = best_node
first_solution.append(__UpperCamelCase )
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(__UpperCamelCase )
for kn in solution[1:-1]:
UpperCamelCase = solution.index(__UpperCamelCase )
if n == kn:
continue
UpperCamelCase = copy.deepcopy(__UpperCamelCase )
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(__UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1] )
_tmp.append(__UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(__UpperCamelCase ) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(__UpperCamelCase ):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(__UpperCamelCase ) >= size:
tabu_list.pop(0 )
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def lowercase__ ( __UpperCamelCase=None )-> Tuple:
UpperCamelCase = generate_neighbours(args.File )
UpperCamelCase ,UpperCamelCase = generate_first_solution(
args.File , __UpperCamelCase )
UpperCamelCase ,UpperCamelCase = tabu_search(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 301 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a__ : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCAmelCase ( A__ = "dhaka" , A__ = 5 ):
lowercase__ = min(A__ , 50 ) # Prevent abuse!
lowercase__ = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
lowercase__ = requests.get('https://www.google.com/search' , params=A__ , headers=A__ )
lowercase__ = BeautifulSoup(html.text , 'html.parser' )
lowercase__ = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
lowercase__ = json.dumps(A__ )
lowercase__ = json.loads(A__ )
lowercase__ = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , A__ , )
if not matched_google_image_data:
return 0
lowercase__ = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(A__ ) , )
lowercase__ = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
lowercase__ = bytes(A__ , 'ascii' ).decode(
'unicode-escape' )
lowercase__ = bytes(A__ , 'ascii' ).decode(
'unicode-escape' )
lowercase__ = urllib.request.build_opener()
lowercase__ = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(A__ )
lowercase__ = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
a__ : List[str] = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 642 |
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
__lowercase = """CIDAS/clipseg-rd64-refined"""
__lowercase = """image_segmenter"""
__lowercase = CLIPSegForImageSegmentation
__lowercase = ["""image""", """text"""]
__lowercase = ["""image"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase_ , return_tensors='pt' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
with torch.no_grad():
_snake_case = self.model(**lowerCAmelCase_ ).logits
return logits
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = outputs.cpu().detach().numpy()
_snake_case = 0
_snake_case = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 495 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=36 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_config()
_snake_case = 3_00
return config
def lowerCamelCase ( self ):
"""simple docstring"""
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.prepare_config_and_inputs()
_snake_case = True
_snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = True
_snake_case = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_choices
_snake_case = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = ()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCamelCase ( self ):
"""simple docstring"""
return
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = 5_02_65
_snake_case = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_snake_case = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = 5_02_65
_snake_case = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 495 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 1 |
def _snake_case ( __snake_case ):
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = sum(__snake_case )
_UpperCamelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCamelCase = True
for i in range(1 , s + 1 ):
_UpperCamelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCamelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCamelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCamelCase = s - 2 * j
break
return diff
| 10 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase: Optional[int] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowercase: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225 | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = torch.exp(snake_case )
_lowerCAmelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
_lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class lowerCamelCase__ ( nn.Module ):
def __init__( self : str , lowercase__ : List[str] ):
super().__init__()
_lowerCAmelCase = config.output_attentions
_lowerCAmelCase = config.output_hidden_states
_lowerCAmelCase = nn.ModuleList([BertLayer(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = nn.ModuleList([BertHighway(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Any ):
if (type(lowercase__ ) is float) or (type(lowercase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase = x
else:
_lowerCAmelCase = x
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : str ):
_lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=None , ):
_lowerCAmelCase = ()
_lowerCAmelCase = ()
_lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = layer_module(
lowercase__ , lowercase__ , head_mask[i] , lowercase__ , lowercase__ )
_lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase = all_attentions + (layer_outputs[1],)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = current_outputs + (all_attentions,)
_lowerCAmelCase = self.highway[i](lowercase__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase = highway_exit[0]
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase__ , i + 1 )
else:
_lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = outputs + (all_attentions,)
_lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Optional[int] , lowercase__ : List[Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = BertEmbeddings(lowercase__ )
_lowerCAmelCase = DeeBertEncoder(lowercase__ )
_lowerCAmelCase = BertPooler(lowercase__ )
self.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[Any] ):
_lowerCAmelCase = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[str] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase__ )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Any=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : int=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if encoder_attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if token_type_ids is None:
_lowerCAmelCase = torch.zeros(lowercase__ , dtype=torch.long , device=lowercase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase = self.get_extended_attention_mask(lowercase__ , lowercase__ , lowercase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase = self.get_head_mask(lowercase__ , self.config.num_hidden_layers )
_lowerCAmelCase = self.embeddings(
input_ids=lowercase__ , position_ids=lowercase__ , token_type_ids=lowercase__ , inputs_embeds=lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[Any] , lowercase__ : int , lowercase__ : Dict ):
_lowerCAmelCase = message
_lowerCAmelCase = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : Optional[Any] ):
super().__init__()
_lowerCAmelCase = BertPooler(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Dict ):
# Pooler
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase = bmodel_output[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowercase__ : Any ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = config.num_hidden_layers
_lowerCAmelCase = DeeBertModel(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Optional[int]=-1 , lowercase__ : Optional[int]=False , ):
_lowerCAmelCase = self.num_layers
try:
_lowerCAmelCase = self.bert(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase = outputs[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase = e.message
_lowerCAmelCase = e.exit_layer
_lowerCAmelCase = outputs[0]
if not self.training:
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = []
_lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase = []
for highway_exit in outputs[-1]:
_lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
_lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase = (loss,) + outputs
if not self.training:
_lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 225 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[Any] = 1
__a : Tuple = 1
__a : Dict = {1: 1}
for inputa in range(2 , lowerCamelCase_ ):
__a : str = 0
__a : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__a : Union[str, Any] = counter
if counter > pre_counter:
__a : Dict = inputa
__a : Optional[int] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 47 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ) -> Any:
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self , snake_case = 1 , snake_case = 100 , snake_case = None , snake_case = None , snake_case = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
_UpperCAmelCase = int(snake_case )
if sample_size % down_scale_factor != 0:
_UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.' )
_UpperCAmelCase = int(snake_case )
_UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
_UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_UpperCAmelCase = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
# set step values
self.scheduler.set_timesteps(snake_case , device=audio.device )
_UpperCAmelCase = self.scheduler.timesteps.to(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(snake_case , snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCAmelCase = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
_UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=snake_case )
| 573 | 0 |
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32 | 1 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__SCREAMING_SNAKE_CASE : Any = TOKENIZER_CLASSES
else:
__SCREAMING_SNAKE_CASE : List[str] = {tokenizer_name: getattr(_SCREAMING_SNAKE_CASE , tokenizer_name + "Fast" )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
__SCREAMING_SNAKE_CASE : str = True
if checkpoint_name is None:
__SCREAMING_SNAKE_CASE : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__SCREAMING_SNAKE_CASE : Tuple = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint.split("/" )
__SCREAMING_SNAKE_CASE : Any = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif add_prefix:
__SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint
__SCREAMING_SNAKE_CASE : str = dump_path
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__SCREAMING_SNAKE_CASE : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__SCREAMING_SNAKE_CASE : List[str] = file_path.split(_SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__SCREAMING_SNAKE_CASE : Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__SCREAMING_SNAKE_CASE : str = tokenizer.save_pretrained(
_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE , filename_prefix=_SCREAMING_SNAKE_CASE )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_SCREAMING_SNAKE_CASE )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowercase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 211 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE : Tuple = len(bin(_SCREAMING_SNAKE_CASE )[3:] )
__SCREAMING_SNAKE_CASE : Optional[int] = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
"1"
+ "0" * (binary_number_length - len(_SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : str ):
__a = len(a_ )
__a = len(a_ )
__a = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__a = []
for char_count in range(a_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 490 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : int ):
return int((input_a, input_a).count(0 ) != 0 )
def SCREAMING_SNAKE_CASE ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 490 | 1 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
create_state_space_tree(lowerCamelCase__ , [] , 0 , [0 for i in range(len(lowerCamelCase__ ) )] )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if index == len(lowerCamelCase__ ):
print(lowerCamelCase__ )
return
for i in range(len(lowerCamelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A: int = True
create_state_space_tree(lowerCamelCase__ , lowerCamelCase__ , index + 1 , lowerCamelCase__ )
current_sequence.pop()
A: Dict = False
__SCREAMING_SNAKE_CASE : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
__SCREAMING_SNAKE_CASE : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 135 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __UpperCamelCase :
def __init__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Dict=24 , lowerCAmelCase : Dict=16 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[Any]=5 , lowerCAmelCase : Any=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : int=10 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=2 , lowerCAmelCase : str=2 , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = frequency_stride
UpperCAmelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase_ = frequency_out_dimension * time_out_dimension
UpperCAmelCase_ = num_patches + 2
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, input_values, labels
def __A ( self : List[str] ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __A ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = ASTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : int ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = ASTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ASTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __lowerCAmelCase ( ):
UpperCAmelCase_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(A )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ , UpperCAmelCase_ = prepare_audio()
UpperCAmelCase_ = audio.squeeze().numpy()
UpperCAmelCase_ = feature_extractor(lowerCAmelCase , sampling_rate=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) ) | 162 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
a__ : List[str] = {}
with open(lowerCAmelCase__ , "r" ) as file:
for line_number, line in enumerate(lowerCAmelCase__ ):
a__ : Tuple = line.strip()
if line:
a__ : List[Any] = line.split()
a__ : Any = line_number
a__ : Tuple = words[0]
a__ : Optional[Any] = value
return result
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
a__ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
a__ : Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
a__ : Optional[int] = "param"
if weight_type is not None and weight_type != "param":
a__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
a__ : int = hf_pointer
for attribute in hf_param_name.split("." ):
a__ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = shape_pointer.shape
# let's reduce dimension
a__ : Dict = value[0]
else:
a__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
a__ : Optional[int] = value
elif weight_type == "weight_g":
a__ : Optional[Any] = value
elif weight_type == "weight_v":
a__ : str = value
elif weight_type == "bias":
a__ : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
a__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[Any] = value
else:
a__ : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
a__ : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
a__ : str = PARAM_MAPPING[full_name.split("." )[-1]]
a__ : Optional[Any] = "param"
if weight_type is not None and weight_type != "param":
a__ : Optional[int] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ : Dict = ".".join([key, hf_param_name] )
else:
a__ : Union[str, Any] = key
a__ : Optional[Any] = value if "lm_head" in full_key else value[0]
__UpperCAmelCase = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=None ) -> List[Any]:
'''simple docstring'''
a__ : str = False
for key, mapped_key in MAPPING.items():
a__ : List[str] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a__ : str = True
if "*" in mapped_key:
a__ : List[Any] = name.split(lowerCAmelCase__ )[0].split("." )[-2]
a__ : int = mapped_key.replace("*" , lowerCAmelCase__ )
if "weight_g" in name:
a__ : str = "weight_g"
elif "weight_v" in name:
a__ : str = "weight_v"
elif "bias" in name:
a__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ : int = "weight"
else:
a__ : Tuple = None
if hf_dict is not None:
rename_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return is_used
return is_used
def lowercase__ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = []
a__ : List[str] = fairseq_model.state_dict()
a__ : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
a__ : Tuple = True
else:
a__ : List[Any] = load_wavaveca_layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = full_name.split("conv_layers." )[-1]
a__ : int = name.split("." )
a__ : Optional[Any] = int(items[0] )
a__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
a__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
a__ : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
a__ : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
a__ : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
a__ : str = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Any = WavaVecaConfig()
if is_seq_class:
a__ : Union[str, Any] = read_txt_into_dict(lowerCAmelCase__ )
a__ : Optional[int] = idalabel
a__ : Any = WavaVecaForSequenceClassification(lowerCAmelCase__ )
a__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
elif is_finetuned:
if dict_path:
a__ : List[str] = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Union[str, Any] = target_dict.pad_index
a__ : Optional[Any] = target_dict.bos_index
a__ : Tuple = target_dict.eos_index
a__ : Tuple = len(target_dict.symbols )
a__ : str = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
a__ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
a__ : Dict = 0
a__ : Dict = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
a__ : int = True if config.feat_extract_norm == "layer" else False
a__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
a__ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
a__ : List[str] = WavaVecaForCTC(lowerCAmelCase__ )
else:
a__ : Union[str, Any] = WavaVecaForPreTraining(lowerCAmelCase__ )
if is_finetuned or is_seq_class:
a__ , a__ , a__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a__ : Optional[Any] = argparse.Namespace(task="audio_pretraining" )
a__ : Tuple = fairseq.tasks.setup_task(lowerCAmelCase__ )
a__ , a__ , a__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
a__ : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 251 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Dict = CLIPTokenizer
__lowerCamelCase : Optional[Any] = CLIPTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
a__ : Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Union[str, Any] = {"unk_token": "<unk>"}
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def UpperCAmelCase ( self : Optional[Any] , **a_ : Tuple ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , **a_ : Any ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , a_ : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = "lower newer"
a__ : Dict = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Optional[Any] = "lower newer"
a__ : Tuple = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
a__ : List[str] = tokens + [tokenizer.unk_token]
a__ : str = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : str = tokenizer_s.tokenize(a_ )
a__ : int = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Dict = "xa\u0303y" + " " + "x\xe3y"
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Optional[int] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : str = tokenizer_s.tokenize(a_ )
a__ : List[Any] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of line break type
a__ : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Dict = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Union[str, Any] = F"{text_of_1_token} {text_of_1_token}"
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : List[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a__ : List[Any] = F" {text}"
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : Tuple = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
pass | 251 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'audio': Audio()} )
_snake_case : ClassVar[Features] = Features({'labels': ClassLabel} )
_snake_case : str = "audio"
_snake_case : str = "labels"
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCamelCase = copy.deepcopy(self )
_UpperCamelCase = self.label_schema.copy()
_UpperCamelCase = features[self.label_column]
_UpperCamelCase = label_schema
return task_template
@property
def snake_case__ ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 98 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _lowercase):
snake_case__ : Union[str, Any] = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='''learned_range''' )
_lowerCamelCase : Optional[int] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCAmelCase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCAmelCase ) - -0.0_01_00_11 < 1E-5
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = scheduler.timesteps
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[str] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(2_5 )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter
_lowerCamelCase : Any = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : str = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Optional[Any] = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : Optional[Any] = pred_prev_sample
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
| 709 |
"""simple docstring"""
lowerCAmelCase__ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCAmelCase__ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( A_ : Any, A_ : Optional[Any], A_ : str ):
'''simple docstring'''
_lowerCamelCase : int = start
# add current to visited
visited.append(A_ )
_lowerCamelCase : str = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCamelCase : int = topological_sort(A_, A_, A_ )
# if all neighbors visited add current to sort
sort.append(A_ )
# if all vertices haven't been visited select a new one to visit
if len(A_ ) != len(A_ ):
for vertice in vertices:
if vertice not in visited:
_lowerCamelCase : List[str] = topological_sort(A_, A_, A_ )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ = topological_sort('''a''', [], [])
print(sort)
| 598 | 0 |
from __future__ import annotations
from math import pi, sqrt
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
UpperCAmelCase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
UpperCAmelCase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
UpperCAmelCase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
UpperCAmelCase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
UpperCAmelCase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
UpperCAmelCase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
UpperCAmelCase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
UpperCAmelCase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
UpperCAmelCase = key.replace('image_encoder.module' , 'flava.image_model' )
UpperCAmelCase = key.replace('text_encoder.module' , 'flava.text_model' )
UpperCAmelCase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
UpperCAmelCase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
UpperCAmelCase = key.replace('text_projection' , 'flava.text_projection' )
UpperCAmelCase = key.replace('image_projection' , 'flava.image_projection' )
UpperCAmelCase = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase = value
return upgrade
@torch.no_grad()
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None ):
if config_path is not None:
UpperCAmelCase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = FlavaConfig()
UpperCAmelCase = FlavaForPreTraining(SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_checkpoint=SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
UpperCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
UpperCAmelCase = upgrade_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCAmelCase = hf_model.state_dict()
UpperCAmelCase = count_parameters(SCREAMING_SNAKE_CASE )
UpperCAmelCase = count_parameters(SCREAMING_SNAKE_CASE ) + count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : str = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 447 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase ( self : List[Any] )-> Dict:
snake_case = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
snake_case = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
snake_case = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
snake_case = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
snake_case = text_generator.model.config.eos_token_id
snake_case = """<pad>"""
snake_case = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCAmelCase ( self : Dict )-> Tuple:
snake_case = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
snake_case = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
snake_case = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCAmelCase ( self : str , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] )-> int:
snake_case = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase ( self : int )-> Dict:
snake_case = """Hello I believe in"""
snake_case = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
snake_case = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
snake_case = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCAmelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : int )-> List[Any]:
snake_case = text_generator.model
snake_case = text_generator.tokenizer
snake_case = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
snake_case = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
snake_case = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
snake_case = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
snake_case = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
snake_case = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
snake_case = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
snake_case = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
snake_case = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_00 , max_new_tokens=20 )
snake_case = text_generator("""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
import torch
# Classic `model_kwargs`
snake_case = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase ( self : List[str] )-> str:
import torch
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : Optional[Any] )-> int:
import torch
snake_case = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCAmelCase ( self : List[str] )-> Any:
snake_case = """Hello world"""
snake_case = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
snake_case = logging.get_logger("""transformers.generation.tf_utils""" )
else:
snake_case = logging.get_logger("""transformers.generation.utils""" )
snake_case = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
snake_case = text_generator(__snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
snake_case = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
snake_case = text_generator(__snake_case , max_length=10 )
self.assertNotIn(__snake_case , cl.out )
| 517 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv3ImageProcessor"
snake_case_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : str , __snake_case : int=None , __snake_case : List[Any]=None , **__snake_case : Optional[Any] )-> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self : Any , __snake_case : int , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
snake_case = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["""words"""]
snake_case = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
snake_case = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(__snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case = images
return encoded_inputs
def lowerCAmelCase ( self : Any , __snake_case : int , __snake_case : Tuple )-> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Optional[int] , **__snake_case : Optional[Any] )-> Tuple:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , *__snake_case : Any , **__snake_case : Optional[Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase ( self : Any )-> Any:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : int )-> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 517 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCAmelCase , 2) - pow(_lowerCAmelCase , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCAmelCase , 2) - pow(_lowerCAmelCase , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCAmelCase , 2) + pow(_lowerCAmelCase , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''') | 44 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : int = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase_ ):
if isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : str = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
A : Dict = size if size is not None else {'''shortest_edge''': 2_24}
A : Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
A : Dict = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
A : str = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Tuple = do_resize
A : Optional[Any] = size
A : str = do_center_crop
A : Union[str, Any] = crop_size
A : int = resample
A : Any = do_rescale
A : Dict = rescale_factor
A : Dict = do_normalize
A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : List[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
A : Optional[Any] = get_resize_output_image_size(__UpperCAmelCase , size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
A : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : List[str] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : List[str] = to_numpy_array(__UpperCAmelCase )
if do_resize:
A : Optional[int] = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase )
if do_center_crop:
A : Dict = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase )
if do_rescale:
A : List[Any] = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase )
if do_normalize:
A : Union[str, Any] = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase )
A : List[Any] = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase )
return image
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
A : int = do_resize if do_resize is not None else self.do_resize
A : int = resample if resample is not None else self.resample
A : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A : str = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Dict = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Dict = size if size is not None else self.size
A : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
A : List[str] = crop_size if crop_size is not None else self.crop_size
A : str = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
A : Union[str, Any] = make_batched(__UpperCAmelCase )
A : Any = [
[
self._preprocess_image(
image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , )
for img in video
]
for video in videos
]
A : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 718 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Dict = '''segformer'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 1_60, 2_56] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_55 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __UpperCAmelCase , )
A : Optional[int] = num_channels
A : int = num_encoder_blocks
A : Optional[Any] = depths
A : List[str] = sr_ratios
A : List[Any] = hidden_sizes
A : Optional[Any] = patch_sizes
A : Any = strides
A : Dict = mlp_ratios
A : Optional[Any] = num_attention_heads
A : int = hidden_act
A : Optional[int] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Optional[int] = classifier_dropout_prob
A : List[Any] = initializer_range
A : int = drop_path_rate
A : Union[str, Any] = layer_norm_eps
A : Union[str, Any] = decoder_hidden_size
A : int = kwargs.get('''reshape_last_stage''' , __UpperCAmelCase )
A : str = semantic_loss_ignore_index
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = version.parse('''1.11''' )
@property
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self ) -> float:
return 1E-4
@property
def snake_case ( self ) -> int:
return 12
| 423 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , a__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
# load decoder from hub
UpperCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def __snake_case ( self : int , **a__ : Optional[int] ):
UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(a__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __snake_case ( self : int , **a__ : str ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ )
def __snake_case ( self : List[str] , **a__ : Optional[Any] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ )
def __snake_case ( self : int ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(a__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = floats_list((3, 1000) )
UpperCAmelCase = feature_extractor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = '''This is a test string'''
UpperCAmelCase = processor(text=a__ )
UpperCAmelCase = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : int , a__ : int=(2, 10, 16) , a__ : List[Any]=77 ):
np.random.seed(a__ )
return np.random.rand(*a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase = processor.decode(a__ )
UpperCAmelCase = decoder.decode_beams(a__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __snake_case ( self : Any , a__ : str ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase = processor.batch_decode(a__ )
else:
with get_context(a__ ).Pool() as pool:
UpperCAmelCase = processor.batch_decode(a__ , a__ )
UpperCAmelCase = list(a__ )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase = decoder.decode_beams_batch(a__ , a__ )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(a__ , decoded_processor.logit_score )
self.assertListEqual(a__ , decoded_processor.lm_score )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = 15
UpperCAmelCase = -20.0
UpperCAmelCase = -4.0
UpperCAmelCase = processor.batch_decode(
a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
UpperCAmelCase = decoded_processor_out.text
UpperCAmelCase = list(a__ )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase = decoder.decode_beams_batch(
a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , a__ )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , a__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , a__ , atol=1e-3 ) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = 2.0
UpperCAmelCase = 5.0
UpperCAmelCase = -20.0
UpperCAmelCase = True
UpperCAmelCase = processor.batch_decode(
a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
UpperCAmelCase = decoded_processor_out.text
UpperCAmelCase = list(a__ )
decoder.reset_params(
alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase = decoder.decode_beams_batch(
a__ , a__ , )
UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , a__ )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a__ , a__ )
def __snake_case ( self : str ):
UpperCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(a__ )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = os.listdir(a__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a__ , a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = floats_list((3, 1000) )
UpperCAmelCase = processor_wavaveca(a__ , return_tensors='''np''' )
UpperCAmelCase = processor_auto(a__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = processor_wavaveca.batch_decode(a__ )
UpperCAmelCase = processor_auto.batch_decode(a__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __snake_case ( a__ : Tuple , a__ : int ):
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self : Dict ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = self._get_dummy_logits()[0]
UpperCAmelCase = processor.decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __snake_case ( self : Any ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = processor.batch_decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self : Dict ):
import torch
UpperCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=a__ )
UpperCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
UpperCAmelCase = iter(a__ )
UpperCAmelCase = next(a__ )
UpperCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase = model(a__ ).logits.cpu().numpy()
UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=a__ )
UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , a__ )
self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , output.text )
# output times
UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''start_time''' ) )
UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''end_time''' ) )
# fmt: off
UpperCAmelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
UpperCAmelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
| 51 | from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''num_heads''' ) )
class _lowerCAmelCase :
def __init__( self : int , __snake_case : List[Any] , __snake_case : Union[str, Any]=13 , __snake_case : Any=64 , __snake_case : int=3 , __snake_case : Optional[Any]=[16, 48, 96] , __snake_case : Tuple=[1, 3, 6] , __snake_case : Optional[Any]=[1, 2, 10] , __snake_case : Tuple=[7, 3, 3] , __snake_case : Optional[int]=[4, 2, 2] , __snake_case : Union[str, Any]=[2, 1, 1] , __snake_case : Optional[int]=[2, 2, 2] , __snake_case : List[str]=[False, False, True] , __snake_case : List[Any]=[0.0, 0.0, 0.0] , __snake_case : Dict=0.0_2 , __snake_case : List[Any]=1e-1_2 , __snake_case : List[str]=True , __snake_case : List[str]=True , __snake_case : Any=2 , ):
lowerCamelCase :List[str] = parent
lowerCamelCase :str = batch_size
lowerCamelCase :Union[str, Any] = image_size
lowerCamelCase :List[str] = patch_sizes
lowerCamelCase :int = patch_stride
lowerCamelCase :List[Any] = patch_padding
lowerCamelCase :int = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :int = num_labels
lowerCamelCase :Optional[Any] = num_channels
lowerCamelCase :int = embed_dim
lowerCamelCase :List[Any] = num_heads
lowerCamelCase :List[str] = stride_kv
lowerCamelCase :List[str] = depth
lowerCamelCase :Tuple = cls_token
lowerCamelCase :Optional[Any] = attention_drop_rate
lowerCamelCase :List[str] = initializer_range
lowerCamelCase :List[str] = layer_norm_eps
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :List[str] = None
if self.use_labels:
# create a random int32 tensor of given shape
lowerCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase :Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[int] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict ):
lowerCamelCase :Tuple = TFCvtModel(config=__snake_case )
lowerCamelCase :str = model(__snake_case , training=__snake_case )
lowerCamelCase :List[Any] = (self.image_size, self.image_size)
lowerCamelCase , lowerCamelCase :List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCamelCase :Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCamelCase :str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self : int , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[Any] ):
lowerCamelCase :Optional[Any] = self.num_labels
lowerCamelCase :Tuple = TFCvtForImageClassification(__snake_case )
lowerCamelCase :List[str] = model(__snake_case , labels=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int ):
lowerCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :List[str] = config_and_inputs
lowerCamelCase :Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = TFCvtModelTester(self )
lowerCamelCase :Optional[Any] = TFCvtConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : str ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def snake_case ( self : Tuple ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def snake_case ( self : str ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def snake_case ( self : Dict ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def snake_case ( self : List[Any] ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def snake_case ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(__snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def snake_case ( self : Optional[Any] ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Any = model_class(__snake_case )
lowerCamelCase :Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :int = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : Tuple ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ):
lowerCamelCase :Dict = model_class(__snake_case )
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.hidden_states
lowerCamelCase :Dict = len(self.model_tester.depth )
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Dict = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def snake_case ( self : List[Any] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Union[str, Any] = TFCvtModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase :str = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Optional[Any] = image_processor(images=__snake_case , return_tensors='''tf''' )
# forward pass
lowerCamelCase :List[str] = model(**__snake_case )
# verify the logits
lowerCamelCase :Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :List[str] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1e-4 ) )
| 166 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a_ : List[str] = logging.getLogger(__name__)
def _A (lowerCAmelCase__ :torch.nn.Module , lowerCAmelCase__ :BnbQuantizationConfig , lowerCAmelCase__ :Union[str, os.PathLike] = None , lowerCAmelCase__ :Optional[Dict[str, Union[int, str, torch.device]]] = None , lowerCAmelCase__ :Optional[List[str]] = None , lowerCAmelCase__ :Optional[Dict[Union[int, str], Union[int, str]]] = None , lowerCAmelCase__ :Optional[Union[str, os.PathLike]] = None , lowerCAmelCase__ :bool = False , ) -> List[str]:
'''simple docstring'''
_a = bnb_quantization_config.load_in_abit
_a = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_a = []
# custom device map
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1:
_a = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_a = get_keys_to_not_convert(lowerCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ )
_a = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_a = []
_a = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase__ )
# compatibility with peft
_a = load_in_abit
_a = load_in_abit
_a = get_parameter_device(lowerCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_a = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
# convert param to the right dtype
_a = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_a = name.replace('.weight' , '' ).replace('.bias' , '' )
_a = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase__ ):
param.to(lowerCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
_a = replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
_a = get_quantized_model_device_map(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_a = True
_a = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ )
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[Any]=None ) -> List[Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
_a = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_a = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_a = {}
_a = special_dtypes
_a = no_split_module_classes
_a = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_a = get_balanced_memory(
lowerCAmelCase__ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , )
_a = max_memory
_a = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# check if don't have any quantized module on the cpu
_a = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_a = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=None ) -> Tuple:
'''simple docstring'''
if modules_to_not_convert is None:
_a = []
_a , _a = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[Any]=None , ) -> List[Any]:
'''simple docstring'''
_a = False
for name, module in model.named_children():
if current_key_name is None:
_a = []
current_key_name.append(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_a = '.'.join(lowerCAmelCase__ )
_a = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_a = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_a = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_a = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_a = module.weight.data
if module.bias is not None:
_a = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_a = True
if len(list(module.children() ) ) > 0:
_a , _a = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_a = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A (lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with init_empty_weights():
_a = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_a = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a = sum(lowerCAmelCase__ , [] )
_a = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
_a = False
if hasattr(lowerCAmelCase__ , 'base_model_prefix' ):
_a = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a = list(model.named_children() )
_a = [list_modules[-1][0]]
# add last module together with tied weights
_a = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
_a = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
_a = ['.weight', '.bias']
_a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a = name.replace(lowerCAmelCase__ , '' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
def _A (lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
for m in model.modules():
if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ):
return True
return False
def _A (lowerCAmelCase__ :nn.Module ) -> Union[str, Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ )
_a = param_name
_a = model
if "." in tensor_name:
_a = tensor_name.split('.' )
for split in splits[:-1]:
_a = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
_a = new_module
_a = splits[-1]
# offload weights
_a = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase__ , index=lowerCAmelCase__ , )
else:
offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
offload_weight(lowerCAmelCase__ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase__ , index=lowerCAmelCase__ )
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 'meta' , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : str = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
a_ : str = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
a_ : Tuple = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
a_ : Dict = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __magic_name__ ( _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __magic_name__ ( _lowerCamelCase: List[str], _lowerCamelCase: str ) -> int:
'''simple docstring'''
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''', '''mmm_image_head''' )
lowerCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''', '''mmm_text_head''' )
lowerCAmelCase = key.replace('''heads.cmd.itm_head.cls''', '''itm_head''' )
lowerCAmelCase = key.replace('''heads.cmd.itm_head.pooler''', '''itm_head.pooler''' )
lowerCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''', '''flava.logit_scale''' )
lowerCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''', '''mlm_head''' )
lowerCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''', '''mim_head''' )
lowerCAmelCase = key.replace('''mm_text_projection''', '''flava.text_to_mm_projection''' )
lowerCAmelCase = key.replace('''mm_image_projection''', '''flava.image_to_mm_projection''' )
lowerCAmelCase = key.replace('''image_encoder.module''', '''flava.image_model''' )
lowerCAmelCase = key.replace('''text_encoder.module''', '''flava.text_model''' )
lowerCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''', '''flava.multimodal_model.cls_token''' )
lowerCAmelCase = key.replace('''mm_encoder.module''', '''flava.multimodal_model''' )
lowerCAmelCase = key.replace('''text_projection''', '''flava.text_projection''' )
lowerCAmelCase = key.replace('''image_projection''', '''flava.image_projection''' )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase: List[Any], _lowerCamelCase: int, _lowerCamelCase: Union[str, Any], _lowerCamelCase: List[Any]=None ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(_lowerCamelCase )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(_lowerCamelCase ).eval()
lowerCAmelCase = convert_dalle_checkpoint(_lowerCamelCase, _lowerCamelCase, save_checkpoint=_lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
lowerCAmelCase = torch.load(_lowerCamelCase, map_location='''cpu''' )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(_lowerCamelCase, map_location='''cpu''' )
lowerCAmelCase = upgrade_state_dict(_lowerCamelCase, _lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(_lowerCamelCase )
lowerCAmelCase = count_parameters(_lowerCamelCase ) + count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase, _lowerCamelCase, atol=1E-3 )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 535 |
"""simple docstring"""
class lowercase :
def __init__(self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = {}
def UpperCAmelCase (self : Union[str, Any] ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(SCREAMING_SNAKE_CASE_ ,''' -> ''' ,''' -> '''.join([str(SCREAMING_SNAKE_CASE_ ) for j in self.vertex[i]] ) )
def UpperCAmelCase (self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(SCREAMING_SNAKE_CASE_ )
else:
# else make a new vertex
lowerCAmelCase = [to_vertex]
def UpperCAmelCase (self : List[str] ) -> None:
"""simple docstring"""
lowerCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : list ) -> None:
"""simple docstring"""
lowerCAmelCase = True
print(SCREAMING_SNAKE_CASE_ ,end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 535 | 1 |
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : list[str] | None = None ):
_a = word_bank or []
# create a table
_a = len(_lowerCamelCase ) + 1
_a = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
_a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
_a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Tuple = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Optional[Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
_lowerCamelCase : Optional[Any] = """zero2"""
_lowerCamelCase : List[str] = """zero3"""
_lowerCamelCase : Union[str, Any] = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = parameterized.to_safe_name('''_'''.join(str(lowercase_ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
_lowerCamelCase : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]) ->Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , )
@parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple) ->Any:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any) ->Union[str, Any]:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->List[Any]:
'''simple docstring'''
A__ = models[model]
A__ = self.run_trainer(
stage=UpperCAmelCase__ , model_name=UpperCAmelCase__ , eval_steps=UpperCAmelCase__ , num_train_epochs=1 , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , )
self.do_checks(UpperCAmelCase__)
return output_dir
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->Any:
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=UpperCAmelCase__)
A__ = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase__)}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
A__ = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
A__ = self.get_launcher(UpperCAmelCase__)
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env())
return output_dir
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[str]=False) ->Optional[Any]:
'''simple docstring'''
A__ = min(2 , get_gpu_count()) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 384 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = os.path.abspath(lowercase_ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
__SCREAMING_SNAKE_CASE : Tuple = tf.train.list_variables(lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__SCREAMING_SNAKE_CASE : int = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
__SCREAMING_SNAKE_CASE : str = name[1:]
# figure out how many levels deep the name is
__SCREAMING_SNAKE_CASE : Dict = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowercase_ )
# read data
__SCREAMING_SNAKE_CASE : Tuple = tf.train.load_variable(lowercase_ , lowercase_ )
names.append('''/'''.join(lowercase_ ) )
arrays.append(lowercase_ )
logger.info(F'''Read a total of {len(lowercase_ ):,} layers''' )
# Sanity check
if len(set(lowercase_ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(lowercase_ ) )})''' )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(lowercase_ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = full_name.split('''/''' )
__SCREAMING_SNAKE_CASE : int = model
__SCREAMING_SNAKE_CASE : Any = []
for i, m_name in enumerate(lowercase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__SCREAMING_SNAKE_CASE : int = getattr(lowercase_ , '''embeddings''' )
__SCREAMING_SNAKE_CASE : int = getattr(lowercase_ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__SCREAMING_SNAKE_CASE : str = getattr(lowercase_ , '''encoder''' )
__SCREAMING_SNAKE_CASE : Any = getattr(lowercase_ , '''layer''' )
__SCREAMING_SNAKE_CASE : Optional[int] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''pooler''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowercase_ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__SCREAMING_SNAKE_CASE : int = getattr(lowercase_ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__SCREAMING_SNAKE_CASE : int = getattr(lowercase_ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase_ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase_ , '''token_type_embeddings''' )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowercase_ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__SCREAMING_SNAKE_CASE : Dict = getattr(lowercase_ , '''attention''' )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__SCREAMING_SNAKE_CASE : Any = getattr(lowercase_ , '''attention''' )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''output''' )
__SCREAMING_SNAKE_CASE : Any = getattr(lowercase_ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''attention''' )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''output''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowercase_ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''output''' )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase_ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''output''' )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowercase_ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase_ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__SCREAMING_SNAKE_CASE : Any = getattr(lowercase_ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__SCREAMING_SNAKE_CASE : str = getattr(lowercase_ , '''intermediate''' )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowercase_ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__SCREAMING_SNAKE_CASE : Dict = getattr(lowercase_ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowercase_ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase_ , '''weight''' )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
__SCREAMING_SNAKE_CASE : Tuple = '''.'''.join(lowercase_ )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowercase_ ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(lowercase_ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
'''simple docstring'''
logger.info(F'''Loading model based on config from {config_path}...''' )
__SCREAMING_SNAKE_CASE : Dict = BertConfig.from_json_file(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertModel(lowercase_ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowerCamelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 716 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Tuple = extended_euclid(lowercase_ , a % b )
__SCREAMING_SNAKE_CASE : int = a // b
return (y, x - k * y)
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : int = extended_euclid(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Any = na * na
__SCREAMING_SNAKE_CASE : str = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = extended_euclid(lowercase_ , lowercase_ )
if b < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (b % n + n) % n
return b
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = invert_modulo(lowercase_ , lowercase_ ), invert_modulo(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = na * na
__SCREAMING_SNAKE_CASE : List[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 401 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : List[str] = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = 0, 0, 0
__UpperCAmelCase : Any = ugly_nums[ia] * 2
__UpperCAmelCase : Optional[Any] = ugly_nums[ia] * 3
__UpperCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __lowerCamelCase ):
__UpperCAmelCase : str = min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ugly_nums.append(__lowerCamelCase )
if next_num == next_a:
ia += 1
__UpperCAmelCase : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCAmelCase : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCAmelCase : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 63 | from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( lowercase: str , lowercase: complex , lowercase: str = "x" , lowercase: float = 10**-10 , lowercase: int = 1 , ) -> complex:
'''simple docstring'''
_UpperCamelCase: Any = symbols(lowercase )
_UpperCamelCase: str = lambdify(lowercase , lowercase )
_UpperCamelCase: str = lambdify(lowercase , diff(lowercase , lowercase ) )
_UpperCamelCase: Optional[int] = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCamelCase: int = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 271 | 0 |
"""simple docstring"""
A = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def UpperCamelCase_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> str:
"""simple docstring"""
assert len(str(lowerCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__magic_name__ : List[str] = year // 100
__magic_name__ : List[Any] = (5 * (century % 4) + 2) % 7
__magic_name__ : Optional[Any] = year % 100
__magic_name__ : Union[str, Any] = centurian % 12
__magic_name__ : Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__magic_name__ : Optional[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__magic_name__ : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def _UpperCAmelCase ( self : Dict , snake_case : List[Any] , snake_case : Dict , snake_case : int=False ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__magic_name__ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : Tuple , snake_case : int=13 , snake_case : Any=7 , snake_case : str=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Any=True , snake_case : List[Any]=99 , snake_case : Any=32 , snake_case : List[str]=32 , snake_case : Union[str, Any]=2 , snake_case : Union[str, Any]=4 , snake_case : List[Any]=37 , snake_case : Tuple="gelu" , snake_case : str=0.1 , snake_case : Dict=0.1 , snake_case : List[Any]=512 , snake_case : Dict=16 , snake_case : int=2 , snake_case : Union[str, Any]=0.02 , snake_case : Optional[Any]=3 , snake_case : int=4 , snake_case : Dict=None , ) -> int:
'''simple docstring'''
__magic_name__ : Dict = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Optional[Any] = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : str = max_position_embeddings
__magic_name__ : Union[str, Any] = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : int = initializer_range
__magic_name__ : int = num_labels
__magic_name__ : Union[str, Any] = num_choices
__magic_name__ : List[Any] = scope
__magic_name__ : str = embedding_size
def _UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Any = None
if self.use_token_type_ids:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : str = None
__magic_name__ : Tuple = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Union[str, Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Dict , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = TFMobileBertModel(config=snake_case )
__magic_name__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Dict = model(snake_case )
__magic_name__ : str = [input_ids, input_mask]
__magic_name__ : List[str] = model(snake_case )
__magic_name__ : Tuple = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[Any] , snake_case : int , snake_case : int , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : List[str] ) -> str:
'''simple docstring'''
__magic_name__ : Dict = TFMobileBertForMaskedLM(config=snake_case )
__magic_name__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str , snake_case : Tuple , snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case )
__magic_name__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : str = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self : Any , snake_case : str , snake_case : List[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Dict = TFMobileBertForPreTraining(config=snake_case )
__magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Optional[int] = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.num_labels
__magic_name__ : List[Any] = TFMobileBertForSequenceClassification(config=snake_case )
__magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Dict = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = self.num_choices
__magic_name__ : Dict = TFMobileBertForMultipleChoice(config=snake_case )
__magic_name__ : Optional[int] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ : str = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Any , snake_case : Dict , snake_case : str ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Tuple = self.num_labels
__magic_name__ : int = TFMobileBertForTokenClassification(config=snake_case )
__magic_name__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[int] , snake_case : Tuple , snake_case : List[str] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : int = TFMobileBertForQuestionAnswering(config=snake_case )
__magic_name__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Any = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def _UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def _UpperCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def _UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def _UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def _UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def _UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
__magic_name__ : str = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : List[str] = model(snake_case )[0]
__magic_name__ : Tuple = [1, 6, 3_0522]
self.assertEqual(output.shape , snake_case )
__magic_name__ : Union[str, Any] = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 147 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowerCAmelCase = {'''mgp-str''': 2_7}
class A ( A_ ):
UpperCamelCase_ : int =VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowerCAmelCase , lowerCAmelCase="[GO]" , lowerCAmelCase="[GO]" , lowerCAmelCase="[s]" , lowerCAmelCase="[GO]" , **lowerCAmelCase ):
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase= json.load(lowerCAmelCase )
__lowercase= {v: k for k, v in self.vocab.items()}
@property
def _A (self ):
return len(self.vocab )
def _A (self ):
return dict(self.vocab , **self.added_tokens_encoder )
def _A (self , lowerCAmelCase ):
__lowercase= []
for s in text:
char_tokens.extend(lowerCAmelCase )
return char_tokens
def _A (self , lowerCAmelCase ):
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def _A (self , lowerCAmelCase ):
return self.decoder.get(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
return (vocab_file,)
| 230 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Tuple ='''align_text_model'''
def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= position_embedding_type
__lowercase= use_cache
__lowercase= pad_token_id
@classmethod
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
cls._set_token_in_kwargs(lowerCAmelCase )
__lowercase, __lowercase= cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__lowercase= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''align_vision_model'''
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 6_0_0 , lowerCAmelCase = 2.0 , lowerCAmelCase = 3.1 , lowerCAmelCase = 8 , lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase = [] , lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase = 0.25 , lowerCAmelCase = "swish" , lowerCAmelCase = 2_5_6_0 , lowerCAmelCase = "mean" , lowerCAmelCase = 0.02 , lowerCAmelCase = 0.0_01 , lowerCAmelCase = 0.99 , lowerCAmelCase = 0.2 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= num_channels
__lowercase= image_size
__lowercase= width_coefficient
__lowercase= depth_coefficient
__lowercase= depth_divisor
__lowercase= kernel_sizes
__lowercase= in_channels
__lowercase= out_channels
__lowercase= depthwise_padding
__lowercase= strides
__lowercase= num_block_repeats
__lowercase= expand_ratios
__lowercase= squeeze_expansion_ratio
__lowercase= hidden_act
__lowercase= hidden_dim
__lowercase= pooling_type
__lowercase= initializer_range
__lowercase= batch_norm_eps
__lowercase= batch_norm_momentum
__lowercase= drop_connect_rate
__lowercase= sum(lowerCAmelCase ) * 4
@classmethod
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
cls._set_token_in_kwargs(lowerCAmelCase )
__lowercase, __lowercase= cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__lowercase= config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''align'''
UpperCamelCase_ : List[Any] =True
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=6_4_0 , lowerCAmelCase=1.0 , lowerCAmelCase=0.02 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
if text_config is None:
__lowercase= {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__lowercase= {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__lowercase= AlignTextConfig(**lowerCAmelCase )
__lowercase= AlignVisionConfig(**lowerCAmelCase )
__lowercase= projection_dim
__lowercase= temperature_init_value
__lowercase= initializer_range
@classmethod
def _A (cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase )
def _A (self ):
__lowercase= copy.deepcopy(self.__dict__ )
__lowercase= self.text_config.to_dict()
__lowercase= self.vision_config.to_dict()
__lowercase= self.__class__.model_type
return output
| 230 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowercase : Dict = logging.get_logger(__name__)
def A_ ( A__ , A__ , A__ ) -> None:
a__ : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(A__ ) == len(A__ ), F'{len(A__ )} != {len(A__ )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowercase : Any = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowercase : Any = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def A_ ( A__ , A__ ) -> List[Any]:
try:
a__ : Optional[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(A__ ) )
def A_ ( A__ , A__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(A__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A_ ( A__ , A__ = "student" , A__ = None , A__ = None , A__=False , A__=None , A__=None , **A__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
a__ : Union[str, Any] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(A__ , A__ ):
AutoTokenizer.from_pretrained(A__ ).save_pretrained(A__ ) # purely for convenience
a__ : int = AutoModelForSeqaSeqLM.from_pretrained(A__ ).eval()
else:
assert isinstance(A__ , A__ ), F'teacher must be a model or string got type {type(A__ )}'
a__ : Union[str, Any] = teacher.config.to_diff_dict()
try:
a__ , a__ : Any = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__ : int = teacher_e
if d is None:
a__ : str = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
a__ , a__ : Optional[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__ : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__ : Optional[int] = teacher_e
if d is None:
a__ : Tuple = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(A__ )
# Copy weights
a__ : str = teacher.config_class(**A__ )
a__ : Dict = AutoModelForSeqaSeqLM.from_config(A__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__ : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=A__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__ : Union[str, Any] = list(range(A__ ) ), list(range(A__ ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(A__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__ : List[int] = pick_layers_to_copy(A__ , A__ )
if d_layers_to_copy is None:
a__ : List[int] = pick_layers_to_copy(A__ , A__ )
try:
if hasattr(
A__ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , A__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , A__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , A__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , A__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , A__ )
copy_layers(teacher.decoder.block , student.decoder.block , A__ )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
a__ : str = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(A__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 392 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''time_series_transformer'''
__A : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 32 , lowercase = 32 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 64 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 100 , lowercase = 0.02 , lowercase=True , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = prediction_length
a__ : str = context_length or prediction_length
a__ : List[str] = distribution_output
a__ : Any = loss
a__ : List[str] = input_size
a__ : int = num_time_features
a__ : Tuple = lags_sequence
a__ : int = scaling
a__ : Union[str, Any] = num_dynamic_real_features
a__ : List[str] = num_static_real_features
a__ : Tuple = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ : Optional[int] = cardinality
else:
a__ : str = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ : Tuple = embedding_dimension
else:
a__ : Optional[Any] = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : Tuple = input_size * len(lowercase) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : List[str] = encoder_attention_heads
a__ : List[str] = decoder_attention_heads
a__ : List[Any] = encoder_ffn_dim
a__ : Any = decoder_ffn_dim
a__ : Dict = encoder_layers
a__ : int = decoder_layers
a__ : List[Any] = dropout
a__ : str = attention_dropout
a__ : Any = activation_dropout
a__ : List[Any] = encoder_layerdrop
a__ : Any = decoder_layerdrop
a__ : int = activation_function
a__ : List[Any] = init_std
a__ : Union[str, Any] = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase)
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 392 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A_ : Tuple = logging.get_logger(__name__)
def snake_case () -> Optional[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase_: Optional[Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase_: List[str] = json.loads(UpperCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase_: Any = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase_: Tuple = json.loads(UpperCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled' , UpperCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def _a ( self ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _lowerCamelCase , )
@cached_property
def _a ( self ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
UpperCamelCase_: str = torch.device('cpu' )
UpperCamelCase_: Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase_: Optional[int] = smp.local_rank()
UpperCamelCase_: Any = torch.device('cuda' , _lowerCamelCase )
UpperCamelCase_: int = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
UpperCamelCase_: Optional[int] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
UpperCamelCase_: Dict = torch.device('cuda' , self.local_rank )
UpperCamelCase_: Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase_: Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase_: Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
UpperCamelCase_: Optional[Any] = torch.device('cuda' , self.local_rank )
UpperCamelCase_: Optional[int] = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase )
return device
@property
def _a ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self ):
return not is_sagemaker_model_parallel_available()
@property
def _a ( self ):
return False | 57 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase_ ( _lowercase : Dict) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> Optional[int]:
"""simple docstring"""
a__ : str = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
a__ : Dict = s_dict.pop(_lowercase)
elif "subsample" in key:
a__ : Optional[Any] = s_dict.pop(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Dict = emb.weight.shape
a__ : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
a__ : Union[str, Any] = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : str) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = torch.load(_lowercase , map_location="""cpu""")
a__ : List[str] = mam_aaa["""args"""]
a__ : List[Any] = mam_aaa["""model"""]
a__ : Tuple = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(_lowercase)
rename_keys(_lowercase)
a__ : Tuple = state_dict["""decoder.embed_tokens.weight"""].shape[0]
a__ : Optional[int] = args.share_decoder_input_output_embed
a__ : Any = [int(_lowercase) for i in args.conv_kernel_sizes.split(""",""")]
a__ : int = SpeechaTextConfig(
vocab_size=_lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(_lowercase) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowercase , num_beams=5 , max_length=200 , use_cache=_lowercase , decoder_start_token_id=2 , early_stopping=_lowercase , )
a__ : str = SpeechaTextForConditionalGeneration(_lowercase)
a__ , a__ : Tuple = model.model.load_state_dict(_lowercase , strict=_lowercase)
if len(_lowercase) > 0 and not set(_lowercase) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''')
if tie_embeds:
a__ : int = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
a__ : int = lm_head_weights
model.save_pretrained(_lowercase)
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase : List[str] =parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 136 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "lilt"
def __init__( self , __UpperCamelCase=3_0522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=None , __UpperCamelCase=4 , __UpperCamelCase=1024 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
__a : Optional[Any] = vocab_size
__a : Union[str, Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : int = hidden_act
__a : int = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = initializer_range
__a : str = layer_norm_eps
__a : List[Any] = position_embedding_type
__a : str = classifier_dropout
__a : Optional[Any] = channel_shrink_ratio
__a : List[str] = max_ad_position_embeddings | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 550 |
import os
from distutils.util import strtobool
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for e in env_keys:
snake_case = int(os.environ.get(UpperCamelCase_ ,-1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = os.environ.get(UpperCamelCase_ ,str(UpperCamelCase_ ) )
return strtobool(UpperCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_="no" ):
"""simple docstring"""
snake_case = os.environ.get(UpperCamelCase_ ,str(UpperCamelCase_ ) )
return value
| 550 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->str:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase = torch.permute(lowerCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
if "metadata" in layer:
UpperCAmelCase = layer.split("""metadata""" )
UpperCAmelCase = """""".join(split_layer[0] )[:-1]
UpperCAmelCase = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCAmelCase = layer.split("""kvstore""" )
UpperCAmelCase = """""".join(split_layer[0] )[:-1]
UpperCAmelCase = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCAmelCase = layer.split("""/""" )
UpperCAmelCase = """/""".join(split_layer[:-1] )
UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCAmelCase = """file"""
else:
UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[str]:
UpperCAmelCase = rename_keys(lowerCAmelCase_ )
UpperCAmelCase = {}
for k, v in current_block.items():
UpperCAmelCase = v
UpperCAmelCase = new_current_block
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = WEIGHTS_NAME ) ->int:
UpperCAmelCase = convert_file_size_to_int(lowerCAmelCase_ )
UpperCAmelCase = []
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCAmelCase = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCAmelCase = flatten_dict(lowerCAmelCase_ , sep="""/""" )
UpperCAmelCase = {}
for layer in checkpoint_info.keys():
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_key_and_tensorstore_dict(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
UpperCAmelCase = content
else:
UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase = torch.tensor(lowerCAmelCase_ )
UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase , UpperCAmelCase = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowerCAmelCase_ )
UpperCAmelCase = """/""".join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase = os.path.join(
lowerCAmelCase_ , weights_name.replace(""".bin""" , F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = raw_weights.to(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase = {}
UpperCAmelCase = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
UpperCAmelCase = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCAmelCase = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase = shard
for key in shard:
UpperCAmelCase = shard_file
# Add the metadata
UpperCAmelCase = {"""total_size""": total_size}
UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + """\n"""
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCamelCase ( ) ->Union[str, Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCAmelCase = TaTokenizer.from_pretrained("""t5-small""" )
UpperCAmelCase = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
UpperCAmelCase = model.generate(lowerCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 627 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCAmelCase = {"""unk_token""": """<unk>"""}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
UpperCAmelCase = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCAmelCase = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase )
def _lowercase ( self : Optional[Any] , **__lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase )
def _lowercase ( self : Union[str, Any] , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase = self.get_image_processor(do_normalize=__lowerCamelCase )
UpperCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCAmelCase = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = """lower newer"""
UpperCAmelCase = processor(text=__lowerCamelCase , return_tensors="""np""" )
UpperCAmelCase = tokenizer(__lowerCamelCase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = """lower newer"""
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = """google/owlvit-base-patch32"""
UpperCAmelCase = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCAmelCase = ["""cat""", """nasa badge"""]
UpperCAmelCase = processor(text=__lowerCamelCase )
UpperCAmelCase = 1_6
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase = """google/owlvit-base-patch32"""
UpperCAmelCase = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCAmelCase = [["""cat""", """nasa badge"""], ["""person"""]]
UpperCAmelCase = processor(text=__lowerCamelCase )
UpperCAmelCase = 1_6
UpperCAmelCase = len(__lowerCamelCase )
UpperCAmelCase = max([len(__lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase = """google/owlvit-base-patch32"""
UpperCAmelCase = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCAmelCase = ["""cat""", """nasa badge"""]
UpperCAmelCase = processor(text=__lowerCamelCase )
UpperCAmelCase = 1_6
UpperCAmelCase = inputs["""input_ids"""]
UpperCAmelCase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(images=__lowerCamelCase , query_images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(__lowerCamelCase )
UpperCAmelCase = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 627 | 1 |
import sys
UpperCamelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _A ( lowerCAmelCase_ : str = N ):
"""simple docstring"""
lowerCAmelCase__ = -sys.maxsize - 1
for i in range(len(lowerCAmelCase_ ) - 12 ):
lowerCAmelCase__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase__ = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 61 |
"""simple docstring"""
def A_ (__a ):
'''simple docstring'''
A_ = len(__a )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ : Any = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 115 | 0 |
from collections.abc import Iterable
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : int | None = None ) -> Optional[int]:
__snake_case : int = value
__snake_case : Node | None = None # Added in order to delete a node easier
__snake_case : Node | None = None
__snake_case : Node | None = None
def __repr__( self : List[Any] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} , indent=1 )
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Node | None = None ) -> str:
__snake_case : str = root
def __str__( self : Tuple ) -> str:
return str(self.root )
def __snake_case ( self : Dict , lowerCamelCase : Node , lowerCamelCase : Node | None ) -> None:
if new_children is not None: # reset its kids
__snake_case : str = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase ): # If it is the right children
__snake_case : Dict = new_children
else:
__snake_case : int = new_children
else:
__snake_case : Any = new_children
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __snake_case ( self : Any ) -> bool:
return self.root is None
def __snake_case ( self : Dict , lowerCamelCase : List[str] ) -> None:
__snake_case : Dict = Node(lowerCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
__snake_case : Union[str, Any] = new_node # set its root
else: # Tree is not empty
__snake_case : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__snake_case : Tuple = new_node # We insert the new node in a leaf
break
else:
__snake_case : Tuple = parent_node.left
else:
if parent_node.right is None:
__snake_case : Optional[int] = new_node
break
else:
__snake_case : List[Any] = parent_node.right
__snake_case : int = parent_node
def __snake_case ( self : int , *lowerCamelCase : Optional[int] ) -> None:
for value in values:
self.__insert(lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
__snake_case : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__snake_case : Optional[Any] = node.left if value < node.value else node.right
return node
def __snake_case ( self : str , lowerCamelCase : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__snake_case : List[str] = self.root
if not self.empty():
while node.right is not None:
__snake_case : int = node.right
return node
def __snake_case ( self : str , lowerCamelCase : Node | None = None ) -> Node | None:
if node is None:
__snake_case : List[str] = self.root
if self.root is None:
return None
if not self.empty():
__snake_case : List[str] = self.root
while node.left is not None:
__snake_case : Optional[int] = node.left
return node
def __snake_case ( self : Dict , lowerCamelCase : int ) -> None:
__snake_case : List[Any] = self.search(lowerCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase , lowerCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase , node.left )
else:
__snake_case : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__snake_case : Tuple = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __snake_case ( self : Any , lowerCamelCase : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __snake_case ( self : List[Any] , lowerCamelCase : Dict=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __snake_case ( self : Optional[Any] , lowerCamelCase : list , lowerCamelCase : Node | None ) -> None:
if node:
self.inorder(lowerCamelCase , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase , node.right )
def __snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Node ) -> int:
__snake_case : list[int] = []
self.inorder(lowerCamelCase , lowerCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = []
if curr_node is not None:
__snake_case : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase_ ( ):
__snake_case : Dict = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
__snake_case : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(__lowerCamelCase )
# Prints all the elements of the list in order traversal
print(__lowerCamelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCamelCase )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 203 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = LDMTextToImagePipeline
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : int = False
def __snake_case ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
__snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__snake_case : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
__snake_case : Dict = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case : Any = CLIPTextModel(lowerCamelCase )
__snake_case : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 ) -> str:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : int = LDMTextToImagePipeline(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__snake_case : int = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Dict:
__snake_case : List[Any] = torch.manual_seed(lowerCamelCase )
__snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__snake_case : int = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : List[str] = self.get_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__snake_case : int = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
__snake_case : List[str] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Optional[Any]:
__snake_case : int = torch.manual_seed(lowerCamelCase )
__snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str] ) -> int:
__snake_case : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase ).images[0]
__snake_case : Tuple = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__snake_case : str = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 203 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Dict:
_lowercase : Optional[Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowercase , _lowercase : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowercase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE )
_lowercase : Any = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Optional[int] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : Any = output_path.read_text(encoding='utf-8' )
_lowercase : str = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
_lowercase : Tuple = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowercase : Any = input_paths[compression_format]
if input_path is None:
_lowercase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE )
assert extractor_format is not None
_lowercase : int = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : List[Any] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : List[str] = output_path.read_text(encoding='utf-8' )
_lowercase : str = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
import tarfile
_lowercase : List[Any] = tmp_path / 'data_dot_dot'
directory.mkdir()
_lowercase : Optional[Any] = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
import tarfile
_lowercase : List[Any] = tmp_path / 'data_sym_link'
directory.mkdir()
_lowercase : str = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=SCREAMING_SNAKE_CASE )
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : Optional[int] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowercase : str = insecure_tar_files[insecure_tar_file]
_lowercase : List[Any] = tmp_path / 'extracted'
TarExtractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowercase : List[str] = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowercase : List[str] = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE ) # but we're right
| 66 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowercase : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase : str = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowercase : Union[str, Any] = [2, 4, 1, 5]
lowercase : Any = len(train_data)
lowercase : Optional[int] = 0.0_09
def lowerCAmelCase_ ( snake_case__ , snake_case__="train" ):
'''simple docstring'''
return calculate_hypothesis_value(snake_case__ , snake_case__ ) - output(
snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = 0
for i in range(len(snake_case__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__=m ):
'''simple docstring'''
A : List[Any] = 0
for i in range(snake_case__ ):
if index == -1:
summation_value += _error(snake_case__ )
else:
summation_value += _error(snake_case__ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = summation_of_cost_derivative(snake_case__ , snake_case__ ) / m
return cost_derivative_value
def lowerCAmelCase_ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A : Dict = 0.00_00_02
A : Optional[Any] = 0
A : int = 0
while True:
j += 1
A : List[str] = [0, 0, 0, 0]
for i in range(0 , len(snake_case__ ) ):
A : Union[str, Any] = get_cost_derivative(i - 1 )
A : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case__ , snake_case__ , atol=snake_case__ , rtol=snake_case__ , ):
break
A : List[Any] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for i in range(len(snake_case__ ) ):
print(('''Actual output value:''', output(snake_case__ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case__ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 634 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class a_:
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[Any]=1_0 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : int=3_2 * 8 , lowerCAmelCase__ : Optional[Any]=3_2 * 8 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : List[str]=6_4 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_auxiliary_loss
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_size
SCREAMING_SNAKE_CASE = max_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = hidden_dim
SCREAMING_SNAKE_CASE = hidden_dim
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
snake_case_)
SCREAMING_SNAKE_CASE = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_)
SCREAMING_SNAKE_CASE = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_) > 0.5
).float()
SCREAMING_SNAKE_CASE = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_) > 0.5).long()
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCamelCase ( self : int) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE = self.num_queries
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE = self.num_channels
SCREAMING_SNAKE_CASE = 6_4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = self.hidden_dim
SCREAMING_SNAKE_CASE = self.hidden_dim
SCREAMING_SNAKE_CASE = self.hidden_dim
return config
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = output.encoder_hidden_states
SCREAMING_SNAKE_CASE = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case_) , config.decoder_layers)
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int=False) -> List[str]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE = MaskaFormerModel(config=snake_case_)
model.to(snake_case_)
model.eval()
SCREAMING_SNAKE_CASE = model(pixel_values=snake_case_ , pixel_mask=snake_case_)
SCREAMING_SNAKE_CASE = model(snake_case_ , output_hidden_states=snake_case_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_)
def __UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(config=snake_case_)
model.to(snake_case_)
model.eval()
def comm_check_on_output(lowerCAmelCase__ : List[str]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=snake_case_ , pixel_mask=snake_case_)
SCREAMING_SNAKE_CASE = model(snake_case_)
comm_check_on_output(snake_case_)
SCREAMING_SNAKE_CASE = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_)
comm_check_on_output(snake_case_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class a_( _a , _a , unittest.TestCase ):
"""simple docstring"""
__snake_case : Any =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__snake_case : List[Any] ={"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__snake_case : List[Any] =False
__snake_case : Optional[int] =False
__snake_case : int =False
__snake_case : Union[str, Any] =False
def __UpperCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskaFormerModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_)
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_)
def __UpperCamelCase ( self : Any) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case_)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def __UpperCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def __UpperCamelCase ( self : str) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __UpperCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : str) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case_)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_)
@slow
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
def __UpperCamelCase ( self : str) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE = {
'pixel_values': torch.randn((2, 3, *size) , device=snake_case_),
'mask_labels': torch.randn((2, 1_0, *size) , device=snake_case_),
'class_labels': torch.zeros(2 , 1_0 , device=snake_case_).long(),
}
SCREAMING_SNAKE_CASE = self.model_tester.get_config()
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(snake_case_).to(snake_case_)
SCREAMING_SNAKE_CASE = model(**snake_case_)
self.assertTrue(outputs.loss is not None)
def __UpperCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_)
def __UpperCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case_).to(snake_case_)
SCREAMING_SNAKE_CASE = model(**snake_case_ , output_attentions=snake_case_)
self.assertTrue(outputs.attentions is not None)
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = model_class(snake_case_)
model.to(snake_case_)
model.train()
SCREAMING_SNAKE_CASE = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_).loss
loss.backward()
def __UpperCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(snake_case_).to(snake_case_)
model.train()
SCREAMING_SNAKE_CASE = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_)
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
__UpperCAmelCase = 1e-4
def A_ ( ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class a_( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __UpperCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def __UpperCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(snake_case_)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(snake_case_ , return_tensors='pt').to(snake_case_)
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case_)
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(snake_case_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_))
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(snake_case_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_))
SCREAMING_SNAKE_CASE = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(snake_case_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_))
def __UpperCamelCase ( self : int) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case_).eval()
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(snake_case_ , return_tensors='pt').to(snake_case_)
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case_)
# masks_queries_logits
SCREAMING_SNAKE_CASE = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
SCREAMING_SNAKE_CASE = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
SCREAMING_SNAKE_CASE = torch.tensor(snake_case_).to(snake_case_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_))
# class_queries_logits
SCREAMING_SNAKE_CASE = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
SCREAMING_SNAKE_CASE = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(snake_case_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_))
def __UpperCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case_).eval()
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].to(snake_case_)
SCREAMING_SNAKE_CASE = [el.to(snake_case_) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE = [el.to(snake_case_) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case_)
self.assertTrue(outputs.loss is not None)
| 720 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a_( unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case : Dict =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__snake_case : Optional[Any] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__snake_case : Any ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}])
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'This is bad'] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=1)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
# Legacy behavior
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]])
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
] , )
@require_torch
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu') , )
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
@require_tf
def __UpperCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
@slow
@require_torch
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text-classification')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('This is bad !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}])
@slow
@require_tf
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text-classification' , framework='tf')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('This is bad !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}])
def __UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE = 'HuggingFace is in'
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}])
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
SCREAMING_SNAKE_CASE = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}, {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [[{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N, [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N] , )
SCREAMING_SNAKE_CASE = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowerCAmelCase__):
text_classifier(lowerCAmelCase__)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE = text_classifier([[['HuggingFace is in ', 'Paris is in France']]])
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
| 259 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__lowerCAmelCase = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__lowerCAmelCase = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"
" attribute with a value from [\'local\', \'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__lowerCAmelCase = f"layers_{str(__SCREAMING_SNAKE_CASE )}"
# Self-Attention
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__lowerCAmelCase = flax_model.params["encoder"]["block"][str(__SCREAMING_SNAKE_CASE )]["layer"]
__lowerCAmelCase = tax_attention_key
__lowerCAmelCase = tax_attention_out
__lowerCAmelCase = tax_attention_query
__lowerCAmelCase = tax_attention_value
__lowerCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = tax_global_layer_norm
if split_mlp_wi:
__lowerCAmelCase = tax_mlp_wi_a
__lowerCAmelCase = tax_mlp_wi_a
else:
__lowerCAmelCase = tax_mlp_wi
__lowerCAmelCase = tax_mlp_wo
__lowerCAmelCase = tax_mlp_layer_norm
__lowerCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
__lowerCAmelCase = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__lowerCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__lowerCAmelCase = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__lowerCAmelCase = tax_encoder_global_rel_embedding
# Assigning
__lowerCAmelCase = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__lowerCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__lowerCAmelCase = f"layers_{str(__SCREAMING_SNAKE_CASE )}"
# Self-Attention
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__lowerCAmelCase = tax_enc_dec_attention_module["key"]["kernel"]
__lowerCAmelCase = tax_enc_dec_attention_module["out"]["kernel"]
__lowerCAmelCase = tax_enc_dec_attention_module["query"]["kernel"]
__lowerCAmelCase = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__lowerCAmelCase = flax_model.params["decoder"]["block"][str(__SCREAMING_SNAKE_CASE )]["layer"]
__lowerCAmelCase = tax_attention_key
__lowerCAmelCase = tax_attention_out
__lowerCAmelCase = tax_attention_query
__lowerCAmelCase = tax_attention_value
__lowerCAmelCase = tax_pre_attention_layer_norm
__lowerCAmelCase = tax_enc_dec_attention_key
__lowerCAmelCase = tax_enc_dec_attention_out
__lowerCAmelCase = tax_enc_dec_attention_query
__lowerCAmelCase = tax_enc_dec_attention_value
__lowerCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
__lowerCAmelCase = tax_mlp_wi_a
__lowerCAmelCase = tax_mlp_wi_a
else:
__lowerCAmelCase = tax_mlp_wi
__lowerCAmelCase = tax_mlp_wo
__lowerCAmelCase = txa_mlp_layer_norm
__lowerCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
__lowerCAmelCase = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__lowerCAmelCase = txa_decoder_norm
# Only for layer 0:
__lowerCAmelCase = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__lowerCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
__lowerCAmelCase = tax_model["target"]["token_embedder"]["embedding"]
__lowerCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__lowerCAmelCase = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__SCREAMING_SNAKE_CASE )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
A : Any = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 636 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ["""prompt"""]
lowerCAmelCase_ = ["""prompt""", """negative_prompt"""]
lowerCAmelCase_ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ = False
@property
def UpperCamelCase__ ( self : List[str] ) -> str:
return 32
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return 32
@property
def UpperCamelCase__ ( self : Union[str, Any] ) -> int:
return self.time_input_dim
@property
def UpperCamelCase__ ( self : List[str] ) -> Tuple:
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self : Any ) -> Any:
return 100
@property
def UpperCamelCase__ ( self : Any ) -> Dict:
_UpperCamelCase =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
_UpperCamelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def UpperCamelCase__ ( self : int ) -> int:
torch.manual_seed(0 )
_UpperCamelCase ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
_UpperCamelCase =PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_UpperCamelCase =nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase__ ( self : Dict ) -> int:
torch.manual_seed(0 )
_UpperCamelCase =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_UpperCamelCase =CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def UpperCamelCase__ ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase =CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase__ ( self : int ) -> Any:
_UpperCamelCase =self.dummy_prior
_UpperCamelCase =self.dummy_image_encoder
_UpperCamelCase =self.dummy_text_encoder
_UpperCamelCase =self.dummy_tokenizer
_UpperCamelCase =self.dummy_image_processor
_UpperCamelCase =UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
_UpperCamelCase ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=0 ) -> Optional[int]:
if str(UpperCamelCase__ ).startswith('''mps''' ):
_UpperCamelCase =torch.manual_seed(UpperCamelCase__ )
else:
_UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_UpperCamelCase ={
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase ='''cpu'''
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
_UpperCamelCase =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
_UpperCamelCase =output.image_embeds
_UpperCamelCase =pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
_UpperCamelCase =image[0, -10:]
_UpperCamelCase =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_UpperCamelCase =np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self : str ) -> Optional[int]:
_UpperCamelCase =torch_device == '''cpu'''
_UpperCamelCase =True
_UpperCamelCase =False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def UpperCamelCase__ ( self : List[str] ) -> Dict:
_UpperCamelCase =torch_device == '''cpu'''
_UpperCamelCase =False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 404 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=30 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=10 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=2 ,):
'''simple docstring'''
snake_case : List[str] = parent
snake_case : Any = batch_size
snake_case : List[str] = image_size
snake_case : str = patch_size
snake_case : List[Any] = num_channels
snake_case : List[str] = is_training
snake_case : Optional[Any] = use_labels
snake_case : List[str] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Optional[int] = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : int = scope
snake_case : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case : Union[str, Any] = (image_size // patch_size) ** 2
snake_case : List[str] = num_patches + 2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : str = None
if self.use_labels:
snake_case : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = TFDeiTModel(config=SCREAMING_SNAKE_CASE_ )
snake_case : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = self.type_sequence_label_size
snake_case : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
snake_case : Any = model(SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : str = 1
snake_case : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Any = config_and_inputs
snake_case : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Optional[int] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = TFDeiTModelTester(self )
snake_case : Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,has_text_modality=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
snake_case : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ ,tf.keras.layers.Dense ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[str] = [*signature.parameters.keys()]
snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
snake_case : Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[int] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase ( ) -> List[Any]:
'''simple docstring'''
snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
snake_case : Optional[Any] = self.default_image_processor
snake_case : Dict = prepare_img()
snake_case : int = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""tf""" )
# forward pass
snake_case : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
snake_case : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
| 315 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[Any] = max_length
snake_case : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = input_ids.shape[-1]
snake_case : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : Tuple = start_length
snake_case : List[str] = max_new_tokens
snake_case : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[str] = max_time
snake_case : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def snake_case_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def lowercase ( __A : StoppingCriteriaList , __A : int ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case : List[Any] = stopping_criteria.max_length
snake_case : List[str] = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 315 | 1 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=A__ ):
"""simple docstring"""
_lowercase = ['transformers', 'torch', 'note_seq']
def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCamelCase( cls : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCamelCase( cls : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 37 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__a = AudioLDMPipeline
__a = TEXT_TO_AUDIO_PARAMS
__a = TEXT_TO_AUDIO_BATCH_PARAMS
__a = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_lowercase , )
__UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
__UpperCAmelCase : Dict = ClapTextModelWithProjection(_lowercase )
__UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__UpperCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_lowercase , )
__UpperCAmelCase : str = SpeechTaHifiGan(_lowercase )
__UpperCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[Any] = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase : Union[str, Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : List[str] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : List[Any] = audioldm_pipe(**_lowercase )
__UpperCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 256
__UpperCAmelCase : int = audio[:10]
__UpperCAmelCase : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : int = audioldm_pipe.to(_lowercase )
__UpperCAmelCase : Any = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : Any = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : int = audioldm_pipe(**_lowercase )
__UpperCAmelCase : List[str] = output.audios[0]
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : str = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : Dict = audioldm_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Tuple = text_inputs["""input_ids"""].to(_lowercase )
__UpperCAmelCase : int = audioldm_pipe.text_encoder(
_lowercase , )
__UpperCAmelCase : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : List[str] = F.normalize(_lowercase , dim=-1 )
__UpperCAmelCase : List[Any] = prompt_embeds
# forward
__UpperCAmelCase : Any = audioldm_pipe(**_lowercase )
__UpperCAmelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : List[str] = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(_lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : Dict = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : str = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : List[Any] = audioldm_pipe(**_lowercase )
__UpperCAmelCase : List[Any] = output.audios[0]
__UpperCAmelCase : Tuple = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : Union[str, Any] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : Optional[Any] = audioldm_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Tuple = text_inputs["""input_ids"""].to(_lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.text_encoder(
_lowercase , )
__UpperCAmelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Any = F.normalize(_lowercase , dim=-1 )
embeds.append(_lowercase )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = embeds
# forward
__UpperCAmelCase : Dict = audioldm_pipe(**_lowercase )
__UpperCAmelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
__UpperCAmelCase : int = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : str = """egg cracking"""
__UpperCAmelCase : Dict = audioldm_pipe(**_lowercase , negative_prompt=_lowercase )
__UpperCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 256
__UpperCAmelCase : List[str] = audio[:10]
__UpperCAmelCase : Union[str, Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=_lowercase )
__UpperCAmelCase : List[Any] = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : List[Any] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__UpperCAmelCase : Optional[Any] = audioldm_pipe(_lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__UpperCAmelCase : str = 2
__UpperCAmelCase : Optional[int] = audioldm_pipe(_lowercase , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : List[str] = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : List[Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : List[str] = audioldm_pipe.vocoder.config.sampling_rate
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **_lowercase )
__UpperCAmelCase : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.016
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **_lowercase )
__UpperCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.032
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = AudioLDMPipeline(**_lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : Tuple = ["""hey"""]
__UpperCAmelCase : List[str] = audioldm_pipe(_lowercase , num_inference_steps=1 )
__UpperCAmelCase : Optional[Any] = output.audios.shape
assert audio_shape == (1, 256)
__UpperCAmelCase : Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__UpperCAmelCase : Optional[Any] = SpeechTaHifiGan(_lowercase ).to(_lowercase )
__UpperCAmelCase : int = audioldm_pipe(_lowercase , num_inference_steps=1 )
__UpperCAmelCase : Optional[int] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=_lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase )
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Dict="cpu" , UpperCamelCase : Any=torch.floataa , UpperCamelCase : Any=0 ):
'''simple docstring'''
__UpperCAmelCase : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 8, 128, 16) )
__UpperCAmelCase : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__UpperCAmelCase : Dict = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : int = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : Optional[int] = self.get_inputs(_lowercase )
__UpperCAmelCase : List[str] = 25
__UpperCAmelCase : Optional[Any] = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 81_920
__UpperCAmelCase : int = audio[77_230:77_240]
__UpperCAmelCase : Tuple = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__UpperCAmelCase : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__UpperCAmelCase : List[Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_inputs(_lowercase )
__UpperCAmelCase : int = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 81_920
__UpperCAmelCase : List[Any] = audio[27_780:27_790]
__UpperCAmelCase : Union[str, Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__UpperCAmelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 703 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict=3 , UpperCamelCase : Dict=32 , UpperCamelCase : int=3 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Any=[8, 16, 32, 64] , UpperCamelCase : Optional[int]=[1, 1, 2, 1] , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[Any]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase : Optional[int]=[2, 3, 4] , UpperCamelCase : Any=1 , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : int = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Dict = len(UpperCamelCase )
__UpperCAmelCase : Tuple = out_features
__UpperCAmelCase : str = out_indices
__UpperCAmelCase : Optional[int] = num_groups
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__a = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : str ):
__UpperCAmelCase : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : List[Any] = layer_type
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCamelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (BitBackbone,) if is_torch_available() else ()
__a = BitConfig
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
| 299 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ):
"""simple docstring"""
a :Union[str, Any] = 2**power
a :Union[str, Any] = 0
while n:
a , a :List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 445 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Tuple = project_dim
a :Optional[int] = pooler_fn
a :int = learn_encoder
a :int = use_attention_mask
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = [r'pooler', r'logit_scale']
SCREAMING_SNAKE_CASE__ = [r'position_ids', r'predictions.decoder.bias']
SCREAMING_SNAKE_CASE__ = 'roberta'
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
a :Tuple = XLMRobertaModel(_lowerCamelCase )
a :Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
a :Optional[int] = getattr(_lowerCamelCase , '''has_pre_transformation''' , _lowerCamelCase )
if self.has_pre_transformation:
a :Tuple = nn.Linear(config.hidden_size , config.project_dim )
a :Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
a :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
a :int = self.base_model(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCamelCase , )
if self.has_pre_transformation:
a :Optional[int] = outputs['''hidden_states'''][-2]
a :List[Any] = self.pre_LN(_lowerCamelCase )
a :Optional[Any] = self.transformation_pre(_lowerCamelCase )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
a :List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 445 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 703 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__snake_case : List[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case : int = sorted(arg_to_scheduler.keys())
__snake_case : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class A__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: argparse.Namespace , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]="base" , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : int = Path(self.hparams.output_dir)
__lowerCAmelCase : List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : PretrainedConfig = config
__lowerCAmelCase : Dict = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
assert hasattr(self.config , _SCREAMING_SNAKE_CASE), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , _SCREAMING_SNAKE_CASE , getattr(self.hparams , _SCREAMING_SNAKE_CASE))
if tokenizer is None:
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : PreTrainedTokenizer = tokenizer
__lowerCAmelCase : int = MODEL_MODES[mode]
if model is None:
__lowerCAmelCase : Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path) , config=self.config , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : Union[str, Any] = model
def _SCREAMING_SNAKE_CASE ( self: str , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_type.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Any = arg_to_scheduler[self.hparams.lr_scheduler]
__lowerCAmelCase : Tuple = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
__lowerCAmelCase : Any = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model
__lowerCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
__lowerCAmelCase : Optional[Any] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__lowerCAmelCase : Dict = Adafactor(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=_SCREAMING_SNAKE_CASE , relative_step=_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Dict = AdamW(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
__lowerCAmelCase : int = optimizer
__lowerCAmelCase : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any) -> Union[str, Any]:
"""simple docstring"""
return self.validation_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any) -> List[Any]:
"""simple docstring"""
return self.validation_end(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
__lowerCAmelCase : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__lowerCAmelCase : List[str] = len(self.test_dataloader().dataset)
else:
__lowerCAmelCase : Tuple = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = len(self.train_dataloader().dataset)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: bool = False) -> int:
"""simple docstring"""
raise NotImplementedError("You must implement this for your task")
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]:
"""simple docstring"""
return self.train_loader
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Tuple:
"""simple docstring"""
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[int]:
"""simple docstring"""
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any) -> int:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
_SCREAMING_SNAKE_CASE , list(filter(_SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("/"))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict[str, Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = self.output_dir.joinpath("best_tfmr")
__lowerCAmelCase : str = self.step_count
self.model.save_pretrained(_SCREAMING_SNAKE_CASE)
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE)
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
parser.add_argument(
"--model_name_or_path" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=_SCREAMING_SNAKE_CASE , help="Pretrained config name or path if not the same as model_name")
parser.add_argument(
"--tokenizer_name" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(_SCREAMING_SNAKE_CASE).parent / "test_run" / "cache") , type=_SCREAMING_SNAKE_CASE , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=_SCREAMING_SNAKE_CASE , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=_SCREAMING_SNAKE_CASE , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=_SCREAMING_SNAKE_CASE , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=_SCREAMING_SNAKE_CASE , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=_SCREAMING_SNAKE_CASE , help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=_SCREAMING_SNAKE_CASE , metavar=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon" , default=1e-8 , type=_SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps" , default=0 , type=_SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers" , default=4 , type=_SCREAMING_SNAKE_CASE , help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("--train_batch_size" , default=32 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("--eval_batch_size" , default=32 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("--adafactor" , action="store_true")
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_SCREAMING_SNAKE_CASE)
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = trainer.lr_schedulers[0]["scheduler"]
__lowerCAmelCase : str = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule) -> str:
"""simple docstring"""
rank_zero_info("***** Validation results *****")
__lowerCAmelCase : Tuple = trainer.callback_metrics
# Log results
for key in sorted(_SCREAMING_SNAKE_CASE):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule) -> List[Any]:
"""simple docstring"""
rank_zero_info("***** Test results *****")
__lowerCAmelCase : Optional[int] = trainer.callback_metrics
# Log and save results to file
__lowerCAmelCase : List[Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt")
with open(_SCREAMING_SNAKE_CASE , "w") as writer:
for key in sorted(_SCREAMING_SNAKE_CASE):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
writer.write("{} = {}\n".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
def _lowercase ( __snake_case ,__snake_case ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" ,default=str(Path(__snake_case ).parent / "test_run" / "model_checkpoints" ) ,type=__snake_case ,help="The output directory where the model predictions and checkpoints will be written." ,)
parser.add_argument(
"--fp16" ,action="store_true" ,help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" ,)
parser.add_argument(
"--fp16_opt_level" ,type=__snake_case ,default="O2" ,help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) ,)
parser.add_argument("--n_tpu_cores" ,dest="tpu_cores" ,type=__snake_case )
parser.add_argument("--max_grad_norm" ,dest="gradient_clip_val" ,default=1.0 ,type=__snake_case ,help="Max gradient norm" )
parser.add_argument("--do_train" ,action="store_true" ,help="Whether to run training." )
parser.add_argument("--do_predict" ,action="store_true" ,help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" ,dest="accumulate_grad_batches" ,type=__snake_case ,default=1 ,help="Number of updates steps to accumulate before performing a backward/update pass." ,)
parser.add_argument("--seed" ,type=__snake_case ,default=42 ,help="random seed for initialization" )
parser.add_argument(
"--data_dir" ,default=str(Path(__snake_case ).parent / "test_run" / "dummy-train-data" ) ,type=__snake_case ,help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ,__snake_case=True ,__snake_case=[] ,__snake_case=None ,__snake_case=None ,**__snake_case ,) -> Tuple:
pl.seed_everything(args.seed )
# init model
__lowerCAmelCase : List[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__lowerCAmelCase : Optional[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir ,prefix="checkpoint" ,monitor="val_loss" ,mode="min" ,save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__snake_case )
if logging_callback is None:
__lowerCAmelCase : Optional[Any] = LoggingCallback()
__lowerCAmelCase : int = {}
if args.fpaa:
__lowerCAmelCase : Optional[int] = 16
if args.gpus > 1:
__lowerCAmelCase : int = "auto"
__lowerCAmelCase : List[Any] = "ddp"
__lowerCAmelCase : Optional[int] = args.accumulate_grad_batches
__lowerCAmelCase : int = None
__lowerCAmelCase : Any = "auto"
__lowerCAmelCase : Optional[Any] = pl.Trainer.from_argparse_args(
__snake_case ,weights_summary=__snake_case ,callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] ,logger=__snake_case ,val_check_interval=1 ,num_sanity_val_steps=2 ,**__snake_case ,)
if args.do_train:
trainer.fit(__snake_case )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer | 615 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : bool ) -> Any:
"""simple docstring"""
def run_func(_SCREAMING_SNAKE_CASE : Tuple ):
@wraps(__lowercase )
def run_in_eager_mode(*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : Any ):
return func(*__lowercase , **__lowercase )
@wraps(__lowercase )
@tf.function(experimental_compile=__lowercase )
def run_in_graph_mode(*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return func(*__lowercase , **__lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case( __lowerCamelCase ):
'''simple docstring'''
UpperCAmelCase : TensorFlowBenchmarkArguments
UpperCAmelCase : PretrainedConfig
UpperCAmelCase : str = "TensorFlow"
@property
def __snake_case ( self ) -> Dict:
return tf.__version__
def __snake_case ( self , A_ , A_ , A_ ) -> Any:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def __snake_case ( self , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def __snake_case ( self , A_ , A_ , A_ ) -> Dict:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def __snake_case ( self , A_ , A_ , A_ ) -> List[str]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def __snake_case ( self , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(__A , """architectures""" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(__A , __A )
lowerCAmelCase = model_cls(__A )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(__A , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __snake_case ( self , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(__A , """architectures""" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(__A , __A )
lowerCAmelCase = model_cls(__A )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(__A , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
lowerCAmelCase = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(__A , labels=__A , training=__A )[0]
lowerCAmelCase = tf.gradients(__A , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __snake_case ( self , A_ ) -> Dict:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
__A , repeat=self.args.repeat , number=10 , )
return min(__A ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
def __snake_case ( self , A_ ) -> Union[str, Any]:
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__A )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(__A )
lowerCAmelCase = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(__A )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
return "N/A", None | 433 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowercase__ ( __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : Tuple=None , __lowercase : Optional[int]=None , __lowercase : Tuple=None , __lowercase : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowercase )
if decoder_head_mask is None:
__UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase )
if cross_attn_head_mask is None:
__UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : Any , __A : Optional[int]=1_3 , __A : Dict=7 , __A : Union[str, Any]=True , __A : Optional[Any]=False , __A : List[Any]=9_9 , __A : str=1_6 , __A : str=2 , __A : List[str]=4 , __A : Optional[Any]=4 , __A : List[Any]="relu" , __A : List[str]=0.1 , __A : Union[str, Any]=0.1 , __A : Dict=0.0 , __A : Tuple=0.0 , __A : str=2_0 , __A : Dict=2 , __A : Dict=1 , __A : Any=0 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def _lowerCamelCase ( self : str ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.eos_token_id # Eos Token
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCamelCase = self.get_config()
__UpperCamelCase = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowerCamelCase ( self : Tuple ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self : int ):
__UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : Any , __A : str , __A : int ):
__UpperCamelCase = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
__UpperCamelCase = inputs_dict['input_ids']
__UpperCamelCase = inputs_dict['attention_mask']
__UpperCamelCase = inputs_dict['head_mask']
# first forward pass
__UpperCamelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCamelCase = model(__A , attention_mask=__A )['last_hidden_state']
__UpperCamelCase = model(__A , attention_mask=__A , past_key_values=__A )[
'last_hidden_state'
]
# select random slice
__UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowerCamelCase ( self : List[Any] , __A : Tuple , __A : str ):
__UpperCamelCase = MaMaaaModel(config=__A ).to(__A ).eval()
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.encoder_last_hidden_state
__UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = model.get_encoder()
encoder.save_pretrained(__A )
__UpperCamelCase = MaMaaaEncoder.from_pretrained(__A ).to(__A )
__UpperCamelCase = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = model.get_decoder()
decoder.save_pretrained(__A )
__UpperCamelCase = MaMaaaDecoder.from_pretrained(__A ).to(__A )
__UpperCamelCase = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str =True
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : str =False
def _lowerCamelCase ( self : Tuple , __A : Tuple , __A : List[str] , __A : Tuple , __A : Dict , __A : List[str] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = MaMaaaModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A )
def _lowerCamelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
__UpperCamelCase , __UpperCamelCase = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info['missing_keys'] , [] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
__UpperCamelCase = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
__UpperCamelCase = inputs['input_ids']
del inputs["input_ids"]
else:
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = inputs.get('decoder_input_ids' , __A )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , __A )
__UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
__UpperCamelCase = wte(__A )
else:
__UpperCamelCase = wte(__A )
__UpperCamelCase = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = input_dict['input_ids']
__UpperCamelCase = input_ids.ne(1 ).to(__A )
__UpperCamelCase = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def lowercase__ ( __lowercase : List[str] ) -> List[str]:
"""simple docstring"""
return torch.tensor(__lowercase , dtype=torch.long , device=__lowercase )
a__ : str =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : Optional[Any] ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__A )
__UpperCamelCase = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
__UpperCamelCase = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
__UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
__UpperCamelCase = model(**__A )[0]
__UpperCamelCase = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
__UpperCamelCase = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__A )
# change to intended input
__UpperCamelCase = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
__UpperCamelCase = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
__UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
__UpperCamelCase = model(**__A )[0]
__UpperCamelCase = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
__UpperCamelCase = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__A )
__UpperCamelCase = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
__UpperCamelCase = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__UpperCamelCase = tokenizer(__A , padding=__A , return_tensors='pt' )
__UpperCamelCase = model.generate(
input_ids=dct['input_ids'].to(__A ) , attention_mask=dct['attention_mask'].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
__UpperCamelCase = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
__UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 399 | 0 |
import numpy
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a ) -> None:
__SCREAMING_SNAKE_CASE = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
self.input_array.shape[1], 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
4, 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__SCREAMING_SNAKE_CASE = numpy.random.rand(3, 1 )
# Real output values provided.
__SCREAMING_SNAKE_CASE = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__SCREAMING_SNAKE_CASE = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ) -> numpy.ndarray:
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T, numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.input_array.T, numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), self.first_hidden_layer_and_second_hidden_layer_weights.T, )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ), )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self, _a, _a, _a ) -> None:
for iteration in range(1, iterations + 1 ):
__SCREAMING_SNAKE_CASE = self.feedforward()
self.back_propagation()
if give_loss:
__SCREAMING_SNAKE_CASE = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self, _a ) -> int:
__SCREAMING_SNAKE_CASE = input_arr
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _A ( __snake_case :numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _A ( __snake_case :numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__SCREAMING_SNAKE_CASE = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__SCREAMING_SNAKE_CASE = TwoHiddenLayerNeuralNetwork(
input_array=__snake_case , output_array=__snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__snake_case , iterations=10 , give_loss=__snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 214 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ) -> Optional[int]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ) -> Any:
"""simple docstring"""
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __snake_case ):
pass
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __snake_case ) is None
with patch_submodule(_test_patching , "len" , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_start_and_stop_mock__"
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , "open" , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_join__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_dirname__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __snake_case ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __snake_case ):
pass
| 214 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Optional[int] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : Dict = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : List[str] = {'text': 'string'}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : List[str] = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : str = TextDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : str = {'text': 'string'}
SCREAMING_SNAKE_CASE : Union[str, Any] = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = text_path
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = [text_path]
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Optional[int] = {'text': 'string'}
SCREAMING_SNAKE_CASE : Optional[Any] = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=("train",) ):
"""simple docstring"""
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for split in splits:
SCREAMING_SNAKE_CASE : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Tuple = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : int = TextDatasetReader({"""train""": text_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE : Optional[Any] = {'text': 'string'}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Tuple = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Dict = TextDatasetReader({"""train""": text_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Optional[Any] = {split: text_path}
else:
SCREAMING_SNAKE_CASE : str = 'train'
SCREAMING_SNAKE_CASE : int = {'train': text_path, 'test': text_path}
SCREAMING_SNAKE_CASE : List[str] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Tuple = {'text': 'string'}
SCREAMING_SNAKE_CASE : Optional[Any] = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 379 |
from timeit import timeit
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __SCREAMING_SNAKE_CASE ( ) -> None:
def do_benchmark(SCREAMING_SNAKE_CASE ) -> None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE ) = }' )
SCREAMING_SNAKE_CASE_ : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE ) = }' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 345 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCAmelCase )
return parser.parse_args()
def lowercase_ ( ) -> str:
lowerCAmelCase__ : Optional[int] = parse_args()
# Import training_script as a module.
lowerCAmelCase__ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ : Optional[int] = script_fpath.stem
lowerCAmelCase__ : Dict = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
lowerCAmelCase__ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 711 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_3_7_8_1_3_7
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowerCAmelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ : Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowerCAmelCase__ : List[Any] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ : Any = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ : int = (b_lata + b_lata) / 2
lowerCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ : Optional[int] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : Dict = cos(sigma / 2 ) ** 2
lowerCAmelCase__ : Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ : Tuple = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : int = sin(sigma / 2 ) ** 2
lowerCAmelCase__ : int = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=A_ ):
__UpperCAmelCase = ['''torch''', '''scipy''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> str:
requires_backends(self , ["""torch""", """scipy"""])
@classmethod
def UpperCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Optional[int]:
requires_backends(cls , ["""torch""", """scipy"""])
@classmethod
def UpperCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
requires_backends(cls , ["""torch""", """scipy"""])
| 88 |
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """LayoutLMv3ImageProcessor"""
snake_case_ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Tuple , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : List[Any] ) -> Union[str, Any]:
_lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowercase , )
_lowercase = kwargs.pop("feature_extractor" )
_lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowercase , _lowercase )
def __call__( self : Optional[int] , _lowercase : int , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _lowercase : Union[List[List[int]], List[List[List[int]]]] = None , _lowercase : Optional[Union[List[int], List[List[int]]]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : List[Any] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_lowercase = self.image_processor(images=_lowercase , return_tensors=_lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_lowercase , _lowercase ):
_lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowercase = features["words"]
_lowercase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel values
_lowercase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_lowercase = self.get_overflowing_images(_lowercase , encoded_inputs["overflow_to_sample_mapping"] )
_lowercase = images
return encoded_inputs
def _lowerCamelCase ( self : str , _lowercase : List[str] , _lowercase : Optional[Any] ) -> Any:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f""" {len(_lowercase )} and {len(_lowercase )}""" )
return images_with_overflow
def _lowerCamelCase ( self : Optional[int] , *_lowercase : str , **_lowercase : Tuple ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : str , *_lowercase : List[Any] , **_lowercase : Optional[int] ) -> str:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def _lowerCamelCase ( self : Dict ) -> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : str ) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , )
return self.image_processor | 717 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : list ):
_lowercase = len(_snake_case )
for _ in range(_snake_case ):
for i in range(_ % 2, arr_size - 1, 2 ):
if arr[i + 1] < arr[i]:
_lowercase , _lowercase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__UpperCamelCase : str = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''') | 227 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCamelCase : Tuple = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
if "://" in dataset_path:
snake_case__ : str = dataset_path.split("://" )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( snake_case_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : fsspec.AbstractFileSystem , snake_case_ : str , snake_case_ : str ):
snake_case__ : Optional[Any] = not is_remote_filesystem(snake_case_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case_ ) , fs._strip_protocol(snake_case_ ) )
else:
fs.mv(snake_case_ , snake_case_ , recursive=snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Optional[Any] = threading.Lock()
| 297 |
import argparse
import os
import re
__lowerCamelCase : int = """src/diffusers"""
# Pattern that looks at the indentation in a line.
__lowerCamelCase : List[str] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCamelCase : Union[str, Any] = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCamelCase : str = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCamelCase : Optional[Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCamelCase : int = re.compile(R"""\[([^\]]+)\]""")
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Any = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : str="" , snake_case_ : int=None , snake_case_ : Tuple=None ):
snake_case__ : str = 0
snake_case__ : int = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
snake_case__ : List[Any] = ["\n".join(lines[:index] )]
else:
snake_case__ : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : List[str] = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
snake_case__ : List[str] = [lines[index + 1]]
index += 1
else:
snake_case__ : Tuple = []
else:
blocks.append("\n".join(snake_case_ ) )
snake_case__ : Dict = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("\n".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
def _inner(snake_case_ : Any ):
return key(snake_case_ ).lower().replace("_" , "" )
return _inner
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : int=None ):
# If no key is provided, we use a noop.
def noop(snake_case_ : Tuple ):
return x
if key is None:
snake_case__ : Dict = noop
# Constants are all uppercase, they go first.
snake_case__ : Union[str, Any] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : Any = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
snake_case__ : Tuple = ignore_underscore(snake_case_ )
return sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
# This inner function sort imports between [ ].
def _replace(snake_case_ : Tuple ):
snake_case__ : Dict = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case__ : int = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Dict = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case_ )] ) + "]"
snake_case__ : Any = import_statement.split("\n" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Optional[int] = 2 if lines[1].strip() == "[" else 1
snake_case__ : Union[str, Any] = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : List[str] = sort_objects(snake_case_ , key=lambda snake_case_ : x[1] )
snake_case__ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : int = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Optional[Any] = keys[:-1]
snake_case__ : List[str] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : int = _re_bracket_content.sub(_replace , snake_case_ )
return import_statement
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str=True ):
with open(snake_case_ , "r" ) as f:
snake_case__ : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : str = split_code_in_indented_blocks(
snake_case_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[int] = main_blocks[block_idx]
snake_case__ : Any = block.split("\n" )
# Get to the start of the imports.
snake_case__ : int = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Dict = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
snake_case__ : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Dict = split_code_in_indented_blocks(snake_case_ , indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : List[str] = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : int = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : List[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
snake_case__ : str = [x[0] for x in sorted(snake_case_ , key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[str] = 0
snake_case__ : Any = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case__ : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : int = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(snake_case_ , "w" ) as f:
f.write("\n".join(snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : int=True ):
snake_case__ : Dict = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
snake_case__ : List[Any] = sort_imports(os.path.join(snake_case_ , "__init__.py" ) , check_only=snake_case_ )
if result:
snake_case__ : List[str] = [os.path.join(snake_case_ , "__init__.py" )]
if len(snake_case_ ) > 0:
raise ValueError(F'''Would overwrite {len(snake_case_ )} files, run `make style`.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowerCamelCase : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 297 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
_lowerCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_lowerCamelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
sd_pipe.set_scheduler('sample_euler' )
_lowerCamelCase = 'A painting of a squirrel eating a burger'
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sd_pipe([prompt] , generator=a__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_lowerCamelCase = output.images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_lowerCamelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
sd_pipe.set_scheduler('sample_euler' )
_lowerCamelCase = 'A painting of a squirrel eating a burger'
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sd_pipe([prompt] , generator=a__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_lowerCamelCase = output.images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def snake_case_ ( self ):
_lowerCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_lowerCamelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_lowerCamelCase = 'A painting of a squirrel eating a burger'
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , generator=a__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=a__ , )
_lowerCamelCase = output.images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[int] ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = "perceiver"
def __init__( self , a__=2_56 , a__=12_80 , a__=7_68 , a__=1 , a__=26 , a__=8 , a__=8 , a__=None , a__=None , a__="kv" , a__=1 , a__=1 , a__="gelu" , a__=0.1 , a__=0.02 , a__=1e-12 , a__=True , a__=2_62 , a__=20_48 , a__=56 , a__=[3_68, 4_96] , a__=16 , a__=19_20 , a__=16 , a__=[1, 16, 2_24, 2_24] , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = num_latents
_lowerCamelCase = d_latents
_lowerCamelCase = d_model
_lowerCamelCase = num_blocks
_lowerCamelCase = num_self_attends_per_block
_lowerCamelCase = num_self_attention_heads
_lowerCamelCase = num_cross_attention_heads
_lowerCamelCase = qk_channels
_lowerCamelCase = v_channels
_lowerCamelCase = cross_attention_shape_for_attention
_lowerCamelCase = self_attention_widening_factor
_lowerCamelCase = cross_attention_widening_factor
_lowerCamelCase = hidden_act
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_query_residual
# masked language modeling attributes
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
# image classification attributes
_lowerCamelCase = image_size
# flow attributes
_lowerCamelCase = train_size
# multimodal autoencoding attributes
_lowerCamelCase = num_frames
_lowerCamelCase = audio_samples_per_frame
_lowerCamelCase = samples_per_patch
_lowerCamelCase = output_shape
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def snake_case_ ( self ):
return 1e-4
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(a__ , a__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase = preprocessor.num_special_tokens_to_add(a__ )
_lowerCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
_lowerCamelCase = dict(preprocessor(a__ , return_tensors=a__ ) )
_lowerCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(a__ , a__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(a__ , fixed_dimension=OnnxConfig.default_fixed_batch )
_lowerCamelCase = self._generate_dummy_images(a__ , a__ , a__ , a__ )
_lowerCamelCase = dict(preprocessor(images=a__ , return_tensors=a__ ) )
_lowerCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 222 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str =logging.get_logger(__name__)
__snake_case : Tuple ={
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""biogpt"""
def __init__(self ,__lowerCamelCase=4_23_84 ,__lowerCamelCase=10_24 ,__lowerCamelCase=24 ,__lowerCamelCase=16 ,__lowerCamelCase=40_96 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=10_24 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,**__lowerCamelCase ,) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = scale_embedding
lowerCAmelCase__ : Optional[int] = use_cache
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : int = activation_dropout
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
| 647 |
def lowerCAmelCase__ ( lowerCamelCase_ : Dict):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_)
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ : Tuple = arr.index(max(arr[0:cur]))
# Reverse from 0 to mi
lowerCAmelCase__ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_)]
# Reverse whole list
lowerCAmelCase__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_)]
cur -= 1
return arr
if __name__ == "__main__":
__snake_case : List[Any] =input('Enter numbers separated by a comma:\n').strip()
__snake_case : Dict =[int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 647 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def snake_case_ ( snake_case , snake_case=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def snake_case_ ( snake_case , snake_case=0 ) -> Optional[int]:
lowercase__: int = []
for old_item in old_list:
lowercase__: int = old_item.replace('in_layers.0' , 'norm1' )
lowercase__: Any = new_item.replace('in_layers.2' , 'conv1' )
lowercase__: Optional[int] = new_item.replace('out_layers.0' , 'norm2' )
lowercase__: List[Any] = new_item.replace('out_layers.3' , 'conv2' )
lowercase__: Union[str, Any] = new_item.replace('emb_layers.1' , 'time_emb_proj' )
lowercase__: Optional[Any] = new_item.replace('skip_connection' , 'conv_shortcut' )
lowercase__: int = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def snake_case_ ( snake_case , snake_case=0 ) -> str:
lowercase__: int = []
for old_item in old_list:
lowercase__: Optional[Any] = old_item
lowercase__: List[str] = new_item.replace('norm.weight' , 'group_norm.weight' )
lowercase__: List[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
lowercase__: Optional[Any] = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
lowercase__: Dict = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
lowercase__: str = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def snake_case_ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None ) -> str:
assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__: Optional[Any] = old_checkpoint[path]
lowercase__: int = old_tensor.shape[0] // 3
lowercase__: str = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__: Union[str, Any] = old_tensor.shape[0] // config['num_head_channels'] // 3
lowercase__: Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__ , lowercase__ , lowercase__: List[Any] = old_tensor.split(channels // num_heads , dim=1 )
lowercase__: Dict = query.reshape(snake_case )
lowercase__: Union[str, Any] = key.reshape(snake_case )
lowercase__: Tuple = value.reshape(snake_case )
for path in paths:
lowercase__: Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__: Optional[Any] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
lowercase__: int = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
lowercase__: Optional[Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__: Union[str, Any] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__: int = old_checkpoint[path['old']][:, :, 0]
else:
lowercase__: Dict = old_checkpoint[path['old']]
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Optional[int] = {}
lowercase__: Union[str, Any] = checkpoint['time_embed.0.weight']
lowercase__: List[str] = checkpoint['time_embed.0.bias']
lowercase__: int = checkpoint['time_embed.2.weight']
lowercase__: List[Any] = checkpoint['time_embed.2.bias']
lowercase__: Dict = checkpoint['input_blocks.0.0.weight']
lowercase__: List[str] = checkpoint['input_blocks.0.0.bias']
lowercase__: List[Any] = checkpoint['out.0.weight']
lowercase__: Optional[int] = checkpoint['out.0.bias']
lowercase__: str = checkpoint['out.2.weight']
lowercase__: Any = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
lowercase__: int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
lowercase__: Union[str, Any] = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the middle blocks only
lowercase__: str = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
lowercase__: int = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the output blocks only
lowercase__: int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
lowercase__: Union[str, Any] = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
for i in range(1 , snake_case ):
lowercase__: Optional[int] = (i - 1) // (config['num_res_blocks'] + 1)
lowercase__: Any = (i - 1) % (config['num_res_blocks'] + 1)
lowercase__: Any = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
lowercase__: str = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
lowercase__: List[Any] = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
lowercase__: List[str] = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
lowercase__: Union[str, Any] = renew_resnet_paths(snake_case )
lowercase__: Union[str, Any] = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
lowercase__: Dict = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case )
if len(snake_case ):
lowercase__: List[Any] = renew_attention_paths(snake_case )
lowercase__: List[Any] = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase__: int = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , )
lowercase__: Optional[int] = middle_blocks[0]
lowercase__: Tuple = middle_blocks[1]
lowercase__: Union[str, Any] = middle_blocks[2]
lowercase__: Tuple = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
lowercase__: Union[str, Any] = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
lowercase__: Optional[int] = renew_attention_paths(snake_case )
lowercase__: int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case )
for i in range(snake_case ):
lowercase__: List[Any] = i // (config['num_res_blocks'] + 1)
lowercase__: Optional[Any] = i % (config['num_res_blocks'] + 1)
lowercase__: Optional[Any] = [shave_segments(snake_case , 2 ) for name in output_blocks[i]]
lowercase__: Tuple = {}
for layer in output_block_layers:
lowercase__ , lowercase__: Any = layer.split('.' )[0], shave_segments(snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case )
else:
lowercase__: Any = [layer_name]
if len(snake_case ) > 1:
lowercase__: Dict = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
lowercase__: int = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
lowercase__: Tuple = renew_resnet_paths(snake_case )
lowercase__: List[str] = renew_resnet_paths(snake_case )
lowercase__: Any = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__: int = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
lowercase__: Any = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
lowercase__: Dict = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(snake_case ) == 2:
lowercase__: Any = []
if len(snake_case ):
lowercase__: Any = renew_attention_paths(snake_case )
lowercase__: List[Any] = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase__: List[Any] = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , )
else:
lowercase__: Optional[int] = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__: str = '.'.join(['output_blocks', str(snake_case ), path['old']] )
lowercase__: Optional[Any] = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] )
lowercase__: List[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 335 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 |
from __future__ import annotations
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) == 0:
return []
lowercase_ , lowercase_ = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
lowercase_ = int(max_value - min_value ) + 1
lowercase_ = [[] for _ in range(UpperCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(UpperCAmelCase__ )
return [v for bucket in buckets for v in sorted(UpperCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 412 | 0 |
import json
import sys
def __lowerCAmelCase ( A , A ):
with open(__snake_case , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.load(__snake_case )
UpperCAmelCase_ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(__snake_case ):
UpperCAmelCase_ = results[benchmark_name]
UpperCAmelCase_ = benchmark_name.split("/" )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
UpperCAmelCase_ = "| metric |"
UpperCAmelCase_ = "|--------|"
UpperCAmelCase_ = "| new / old (diff) |"
for metric_name in sorted(__snake_case ):
UpperCAmelCase_ = benchmark_res[metric_name]
UpperCAmelCase_ = metric_vals["new"]
UpperCAmelCase_ = metric_vals.get("old" , __snake_case )
UpperCAmelCase_ = metric_vals.get("diff" , __snake_case )
UpperCAmelCase_ = F" {new_val:f}" if isinstance(__snake_case , (int, float) ) else "None"
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(__snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(__snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(__snake_case ) )
if __name__ == "__main__":
_a: List[str] = sys.argv[1]
_a: int = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 712 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a: str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = ReformerTokenizer
SCREAMING_SNAKE_CASE__ = ReformerTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCAmelCase ) , 1_000 )
def __A ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : Optional[int]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : int ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@require_torch
@slow
def __A ( self : Tuple ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(lowerCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence["input_ids"].shape
UpperCAmelCase_ = ReformerModel(lowerCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase )
model(**lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCAmelCase , sequences=lowerCAmelCase , ) | 268 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.