code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : Optional[int] = 1
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE=20_00 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=1E-3 ):
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Dict = torch.linspace(1 , self.config.sampling_eps , _SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCAmelCase : str = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCAmelCase : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCAmelCase : Optional[int] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCAmelCase : Tuple = std.unsqueeze(-1 )
__lowerCAmelCase : int = -score / std
# compute
__lowerCAmelCase : List[str] = -1.0 / len(self.timesteps )
__lowerCAmelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCAmelCase : Optional[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCAmelCase : Any = beta_t.unsqueeze(-1 )
__lowerCAmelCase : int = -0.5 * beta_t * x
__lowerCAmelCase : Dict = torch.sqrt(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = drift - diffusion**2 * score
__lowerCAmelCase : Tuple = x + drift * dt
# add noise
__lowerCAmelCase : int = randn_tensor(x.shape , layout=x.layout , generator=_SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
__lowerCAmelCase : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase: Dict = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: int = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = True
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__A = (LlamaForCausalLM,) if is_torch_available() else ()
__A = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
a = original_model(lowerCamelCase_ ).last_hidden_state
a = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
a = scaled_model(lowerCamelCase_ ).last_hidden_state
a = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
a = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
a = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(lowerCamelCase_ , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
a = model.generate(lowerCamelCase_ , max_new_tokens=64 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 1 |
'''simple docstring'''
import math
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if (
not isinstance(UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if (
not isinstance(UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | '''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "encoder-decoder"
SCREAMING_SNAKE_CASE = True
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Optional[int] = kwargs.pop('''encoder''' )
lowercase__ : Union[str, Any] = encoder_config.pop('''model_type''' )
lowercase__ : Any = kwargs.pop('''decoder''' )
lowercase__ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ : Union[str, Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Optional[Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Tuple = True
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase__ : Union[str, Any] = True
lowercase__ : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Any = copy.deepcopy(self.__dict__ )
lowercase__ : Optional[Any] = self.encoder.to_dict()
lowercase__ : Tuple = self.decoder.to_dict()
lowercase__ : Dict = self.__class__.model_type
return output
| 198 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :int , lowercase_ :Any=3_20_00 , lowercase_ :Optional[int]=10_24 , lowercase_ :Optional[Any]=24 , lowercase_ :Optional[Any]=16 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :Dict="gelu" , lowercase_ :int=True , lowercase_ :int="bi" , lowercase_ :Dict=0.0_2 , lowercase_ :str=1E-12 , lowercase_ :Optional[int]=0.1 , lowercase_ :Tuple=5_12 , lowercase_ :Tuple=None , lowercase_ :Any=True , lowercase_ :Tuple=False , lowercase_ :List[Any]=False , lowercase_ :int=-1 , lowercase_ :Tuple=False , lowercase_ :Optional[Any]="last" , lowercase_ :str=True , lowercase_ :Tuple="tanh" , lowercase_ :List[Any]=0.1 , lowercase_ :Optional[int]=5 , lowercase_ :Dict=5 , lowercase_ :int=5 , lowercase_ :Dict=1 , lowercase_ :List[Any]=2 , **lowercase_ :Optional[int] , )-> Dict:
A__ = vocab_size
A__ = d_model
A__ = n_layer
A__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
A__ = d_model // n_head
A__ = ff_activation
A__ = d_inner
A__ = untie_r
A__ = attn_type
A__ = initializer_range
A__ = layer_norm_eps
A__ = dropout
A__ = mem_len
A__ = reuse_len
A__ = bi_data
A__ = clamp_len
A__ = same_length
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_last_dropout
A__ = start_n_top
A__ = end_n_top
A__ = bos_token_id
A__ = pad_token_id
A__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowercase_ , )
A__ = kwargs["use_cache"]
A__ = use_mems_eval
A__ = use_mems_train
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self :Any )-> List[Any]:
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase_ ( self :Tuple , lowercase_ :str )-> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 371 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Optional[Any] ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : Union[str, Any] ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : str ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
def remove_articles(_lowerCamelCase : Dict ):
A__ = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCamelCase , " " , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : int ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
A__ = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_00
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
A__ = [rgram for rgrams in rgramslist for rgram in rgrams]
A__ = Counter(_lowerCamelCase )
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for sgram, scount in sgramcounter.items():
A__ = scount * numref
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for cgram, ccount in cgramcounter.items():
A__ = ccount * numref
# KEEP
A__ = sgramcounter_rep & cgramcounter_rep
A__ = keepgramcounter_rep & rgramcounter
A__ = sgramcounter_rep & rgramcounter
A__ = 0
A__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A__ = sgramcounter_rep - cgramcounter_rep
A__ = delgramcounter_rep - rgramcounter
A__ = sgramcounter_rep - rgramcounter
A__ = 0
A__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = deltmpscorea / len(_lowerCamelCase )
# ADDITION
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) & set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
A__ = 0
if addscore_precision > 0 or addscore_recall > 0:
A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = len(_lowerCamelCase )
A__ = ssent.split(" " )
A__ = csent.split(" " )
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
for rsent in rsents:
A__ = rsent.split(" " )
A__ = []
A__ = []
A__ = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A__ = sum([delascore, delascore, delascore, delascore] ) / 4
A__ = sum([addascore, addascore, addascore, addascore] ) / 4
A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
A__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A__ = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
A__ = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
A__ = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
A__ = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
A__ = sentence
if not return_str:
A__ = normalized_sent.split()
return normalized_sent
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
A__ = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
A__ = sari_score / len(_lowerCamelCase )
return 1_00 * sari_score
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]="exp" , _lowerCamelCase : int=None , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=False , ):
A__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
A__ = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase_ ( self :str , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int )-> int:
A__ = {}
result.update({"sari": compute_sari(sources=lowercase_ , predictions=lowercase_ , references=lowercase_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowercase_ , references=lowercase_ )} )
result.update({"exact": compute_em(predictions=lowercase_ , references=lowercase_ )} )
return result
| 123 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Any = '''Hello, World!'''
lowerCamelCase : int = '''en_XX'''
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
__lowercase : List[Any] = Path("""data_bin""" )
__lowercase : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(snake_case__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(snake_case__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__lowercase : str = xmod.model.encoder.sentence_encoder
__lowercase : Optional[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , snake_case__ )
__lowercase : int = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__lowercase : Dict = xmod_sent_encoder.embed_positions.weight
__lowercase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase : Tuple = xmod_sent_encoder.layernorm_embedding.weight
__lowercase : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase : Tuple = model.roberta.encoder.layer[i]
__lowercase : int = xmod_sent_encoder.layers[i]
# self attention
__lowercase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__lowercase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
__lowercase : Optional[int] = xmod_layer.self_attn.q_proj.bias
__lowercase : List[Any] = xmod_layer.self_attn.k_proj.weight
__lowercase : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
__lowercase : Optional[int] = xmod_layer.self_attn.v_proj.weight
__lowercase : Tuple = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase : List[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__lowercase : List[str] = xmod_layer.self_attn.out_proj.weight
__lowercase : Optional[Any] = xmod_layer.self_attn.out_proj.bias
__lowercase : Optional[int] = xmod_layer.self_attn_layer_norm.weight
__lowercase : Union[str, Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__lowercase : Union[str, Any] = xmod_layer.fca.weight
__lowercase : Dict = xmod_layer.fca.bias
# output
__lowercase : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__lowercase : str = xmod_layer.fca.weight
__lowercase : List[Any] = xmod_layer.fca.bias
__lowercase : Any = xmod_layer.final_layer_norm.weight
__lowercase : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase : str = xmod_layer.adapter_layer_norm.weight
__lowercase : Union[str, Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase : str = bert_output.adapter_modules[lang_code]
__lowercase : Any = xmod_layer.adapter_modules[lang_code]
__lowercase : Optional[int] = from_adapter.fca.weight
__lowercase : Any = from_adapter.fca.bias
__lowercase : Union[str, Any] = from_adapter.fca.weight
__lowercase : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase : Optional[Any] = xmod_sent_encoder.layer_norm.weight
__lowercase : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].dense.weight
__lowercase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].dense.bias
__lowercase : Optional[int] = xmod.model.classification_heads["""mnli"""].out_proj.weight
__lowercase : Optional[Any] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowercase : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight
__lowercase : List[Any] = xmod.model.encoder.lm_head.dense.bias
__lowercase : str = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase : Optional[Any] = xmod.model.encoder.lm_head.weight
__lowercase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase : str = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__lowercase : Any = model(snake_case__ )[0]
if classification_head:
__lowercase : List[str] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(snake_case__ ) )
else:
__lowercase : List[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__lowercase : Tuple = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _a ( _lowerCAmelCase ):
A = 42
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__(self, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 88, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "geglu", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, ) -> Optional[int]:
super().__init__()
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: str = attention_head_dim
UpperCAmelCase_: Optional[int] = num_attention_heads * attention_head_dim
UpperCAmelCase_: str = in_channels
UpperCAmelCase_: str = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_, num_channels=SCREAMING_SNAKE_CASE_, eps=1E-6, affine=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = nn.Linear(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# 3. Define transformers blocks
UpperCAmelCase_: int = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dropout=SCREAMING_SNAKE_CASE_, cross_attention_dim=SCREAMING_SNAKE_CASE_, activation_fn=SCREAMING_SNAKE_CASE_, attention_bias=SCREAMING_SNAKE_CASE_, double_self_attention=SCREAMING_SNAKE_CASE_, norm_elementwise_affine=SCREAMING_SNAKE_CASE_, )
for d in range(SCREAMING_SNAKE_CASE_ )
] )
UpperCAmelCase_: str = nn.Linear(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = True, ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = hidden_states.shape
UpperCAmelCase_: Optional[int] = batch_frames // num_frames
UpperCAmelCase_: List[Any] = hidden_states
UpperCAmelCase_: int = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = hidden_states.permute(0, 2, 1, 3, 4 )
UpperCAmelCase_: Any = self.norm(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = self.proj_in(SCREAMING_SNAKE_CASE_ )
# 2. Blocks
for block in self.transformer_blocks:
UpperCAmelCase_: List[Any] = block(
SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, timestep=SCREAMING_SNAKE_CASE_, cross_attention_kwargs=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_, )
# 3. Output
UpperCAmelCase_: int = self.proj_out(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = (
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
.permute(0, 3, 4, 1, 2 )
.contiguous()
)
UpperCAmelCase_: str = hidden_states.reshape(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 82 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase ):
A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", ["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: List[Any] = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics, public health""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: Tuple = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: str = classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""This text is about {}""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_: Union[str, Any] = classifier(["""I am happy"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
], )
UpperCAmelCase_: Dict = classifier(["""I am happy""", """I am sad"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""""", candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_, candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels="""""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""Not formatting template""", )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template=SCREAMING_SNAKE_CASE_, )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = zero_shot_classifier.model.config
UpperCAmelCase_: Optional[int] = config.labelaid
UpperCAmelCase_: str = zero_shot_classifier.entailment_id
UpperCAmelCase_: Union[str, Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
UpperCAmelCase_: int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Dict = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
UpperCAmelCase_: Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_, zero_shot_classifier.entailment_id )
@require_torch
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100, candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
UpperCAmelCase_: Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@require_tf
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""tf""", )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@slow
@require_torch
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""pt""" )
UpperCAmelCase_: Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
@slow
@require_tf
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""tf""" )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
| 82 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[str] , lowerCamelCase :Any , lowerCamelCase :List[Any]=13 , lowerCamelCase :Tuple=7 , lowerCamelCase :List[str]=True , lowerCamelCase :List[Any]=True , lowerCamelCase :List[str]=True , lowerCamelCase :Dict=True , lowerCamelCase :Tuple=99 , lowerCamelCase :List[str]=32 , lowerCamelCase :Dict=5 , lowerCamelCase :List[str]=4 , lowerCamelCase :Optional[Any]=37 , lowerCamelCase :Optional[Any]="gelu" , lowerCamelCase :Optional[int]=0.1 , lowerCamelCase :Union[str, Any]=0.1 , lowerCamelCase :Union[str, Any]=512 , lowerCamelCase :List[str]=16 , lowerCamelCase :int=2 , lowerCamelCase :str=0.02 , lowerCamelCase :int=4 , ) -> Optional[Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self :Optional[int] ) -> int:
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = True
UpperCAmelCase_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]:
UpperCAmelCase__ = FlaxBertModelTester(self )
@slow
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCAmelCase__ = FlaxBertModel.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
| 169 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.uniform(lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
UpperCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
| 169 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
__SCREAMING_SNAKE_CASE : str = 6_3_7_8_1_3_7.0
__SCREAMING_SNAKE_CASE : Optional[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
__SCREAMING_SNAKE_CASE : Any = 6_3_7_8_1_3_7
def __UpperCAmelCase ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
"""simple docstring"""
_lowerCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
_lowerCAmelCase = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_lowerCAmelCase = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_lowerCAmelCase = radians(snake_case_ )
_lowerCAmelCase = radians(snake_case_ )
# Equation
_lowerCAmelCase = sin((phi_a - phi_a) / 2 )
_lowerCAmelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_lowerCAmelCase = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 354 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = os.path.join(args.tf_model_dir , """parameters.json""" )
_lowerCAmelCase : Tuple = json.loads(open(lowercase__ ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(""".pt""" ):
_lowerCAmelCase : List[Any] = args.output + """.pt"""
_lowerCAmelCase : Optional[int] = OrderedDict()
with tf.device("""/CPU:0""" ):
_lowerCAmelCase : Tuple = tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase : List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase : List[str] = reader.get_tensor(lowercase__ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_lowerCAmelCase : Dict = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_lowerCAmelCase : Any = 8
_lowerCAmelCase : Tuple = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/moe""" ):
_lowerCAmelCase : Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_lowerCAmelCase : int = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
_lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/softmlp/kernel""" ):
_lowerCAmelCase : Tuple = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
_lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = torch.tensor(lowercase__ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_lowerCAmelCase : Tuple = key_name[-9:-7]
for i in range(16 ):
_lowerCAmelCase : Dict = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
_lowerCAmelCase : Optional[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/mlp""" ):
_lowerCAmelCase : Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_lowerCAmelCase : List[str] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
_lowerCAmelCase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Dict = torch.tensor(lowercase__ )
elif key_name.endswith("""/p1/bias""" ):
_lowerCAmelCase : Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
_lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Any = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/kernel""" ):
_lowerCAmelCase : List[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
_lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/bias""" ):
_lowerCAmelCase : Any = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
_lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Tuple = torch.tensor(lowercase__ )
elif key_name.startswith("""model/ln""" ):
_lowerCAmelCase : str = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_lowerCAmelCase : Dict = """model.blocks.%d.feed_forward.norm.bias""" % player
_lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
_lowerCAmelCase : Tuple = """model.blocks.%d.feed_forward.norm.weight""" % player
_lowerCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/att""" ):
_lowerCAmelCase : Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_lowerCAmelCase : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase : str = state[:, 0, :, :]
_lowerCAmelCase : int = state[:, 1, :, :]
_lowerCAmelCase : Dict = state[:, 2, :, :]
_lowerCAmelCase : Any = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[Any] = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
_lowerCAmelCase : List[str] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
_lowerCAmelCase : Union[str, Any] = torch.tensor(lowercase__ )
_lowerCAmelCase : str = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
_lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ )
elif key_name.endswith("""/o/kernel""" ):
_lowerCAmelCase : List[Any] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
_lowerCAmelCase : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/an""" ):
_lowerCAmelCase : Any = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_lowerCAmelCase : Any = """model.blocks.%d.self_attn.norm.bias""" % player
_lowerCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
_lowerCAmelCase : Dict = """model.blocks.%d.self_attn.norm.weight""" % player
_lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Tuple = torch.tensor(lowercase__ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_lowerCAmelCase : Optional[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
_lowerCAmelCase : str = """model.%s.weight""" % nlayer
_lowerCAmelCase : Union[str, Any] = vnp.copy() # same in embedded
_lowerCAmelCase : Optional[Any] = torch.tensor(lowercase__ )
if key_name.startswith("""model/wte""" ):
_lowerCAmelCase : Optional[Any] = """lm_head.weight"""
_lowerCAmelCase : int = vnp.copy() # same in embedded
_lowerCAmelCase : int = torch.tensor(lowercase__ )
elif key_name.startswith("""model/wob""" ):
_lowerCAmelCase : Union[str, Any] = """final_logits_bias"""
_lowerCAmelCase : Tuple = vnp.copy() # same in embedded
_lowerCAmelCase : int = state.reshape((1, -1) )
_lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase : Optional[int] = """model.last_project.weight"""
_lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[str] = torch.tensor(lowercase__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase : Tuple = """model.last_project.bias"""
_lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : List[Any] = torch.tensor(lowercase__ )
torch.save(lowercase__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 309 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "levit"
def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = image_size
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :Union[str, Any] = kernel_size
lowerCAmelCase_ :Optional[Any] = stride
lowerCAmelCase_ :Optional[int] = padding
lowerCAmelCase_ :Optional[Any] = hidden_sizes
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :int = depths
lowerCAmelCase_ :List[str] = key_dim
lowerCAmelCase_ :str = drop_path_rate
lowerCAmelCase_ :Optional[int] = patch_size
lowerCAmelCase_ :Union[str, Any] = attention_ratio
lowerCAmelCase_ :Dict = mlp_ratio
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 84 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : Any = len(lowerCAmelCase__ )
__a : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : List[Any] = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Union[str, Any] = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 90 |
def __UpperCamelCase ( lowerCAmelCase__ : int = 1_0_0_0 ):
__a : int = -1
__a : Any = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__a : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__a : Union[str, Any] = n - a - b
if c * c == (a * a + b * b):
__a : Union[str, Any] = a * b * c
if candidate >= product:
__a : List[Any] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 90 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : Optional[int] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''rag'''
UpperCamelCase__ = True
def __init__( self : str , lowercase_ : Any=None , lowercase_ : List[Any]=True , lowercase_ : str=None , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=" / " , lowercase_ : Tuple=" // " , lowercase_ : List[Any]=5 , lowercase_ : List[str]=300 , lowercase_ : List[Any]=768 , lowercase_ : Dict=8 , lowercase_ : Any="wiki_dpr" , lowercase_ : List[Any]="train" , lowercase_ : Optional[Any]="compressed" , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.0 , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=None , **lowercase_ : int , ):
super().__init__(
bos_token_id=lowercase_ , pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , prefix=lowercase_ , vocab_size=lowercase_ , **lowercase_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase_ : Optional[int] = kwargs.pop("""question_encoder""" )
lowercase_ : str = question_encoder_config.pop("""model_type""" )
lowercase_ : Optional[Any] = kwargs.pop("""generator""" )
lowercase_ : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowercase_ : Optional[int] = AutoConfig.for_model(lowercase_ , **lowercase_ )
lowercase_ : Any = AutoConfig.for_model(lowercase_ , **lowercase_ )
lowercase_ : Dict = reduce_loss
lowercase_ : Union[str, Any] = label_smoothing
lowercase_ : int = exclude_bos_score
lowercase_ : List[Any] = do_marginalize
lowercase_ : int = title_sep
lowercase_ : Any = doc_sep
lowercase_ : Dict = n_docs
lowercase_ : Optional[int] = max_combined_length
lowercase_ : List[str] = dataset
lowercase_ : Optional[Any] = dataset_split
lowercase_ : Dict = index_name
lowercase_ : List[Any] = retrieval_vector_size
lowercase_ : str = retrieval_batch_size
lowercase_ : Any = passages_path
lowercase_ : Union[str, Any] = index_path
lowercase_ : str = use_dummy_dataset
lowercase_ : List[Any] = output_retrieved
lowercase_ : int = do_deduplication
lowercase_ : List[str] = use_cache
if self.forced_eos_token_id is None:
lowercase_ : Optional[Any] = getattr(self.generator , """forced_eos_token_id""" , lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : int ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : int = copy.deepcopy(self.__dict__ )
lowercase_ : List[Any] = self.question_encoder.to_dict()
lowercase_ : Optional[int] = self.generator.to_dict()
lowercase_ : str = self.__class__.model_type
return output
| 239 | '''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Dict = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 239 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class A__ ( unittest.TestCase , UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
_UpperCAmelCase : Any = load_tool("text-classification" , remote=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" ) | 354 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_a = '''.'''
if __name__ == "__main__":
_a = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
_a = []
_a = []
with open(doctest_file_path) as fp:
for line in fp:
_a = line.strip()
_a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_a = '''\n'''.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 39 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_SCREAMING_SNAKE_CASE : Optional[int] = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class a ( unittest.TestCase , __snake_case ):
def UpperCamelCase ( self : int ) -> Optional[int]:
lowerCamelCase_ = load_tool('text-question-answering' )
self.tool.setup()
lowerCamelCase_ = load_tool('text-question-answering' , remote=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Tuple:
lowerCamelCase_ = self.tool(__SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ = self.remote_tool(__SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ = self.tool(text=__SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
lowerCamelCase_ = self.remote_tool(text=__SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
| 183 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = " " ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = 0
for index, char in enumerate(lowerCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__UpperCAmelCase : Union[str, Any] = index + 1
elif index + 1 == len(lowerCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCAmelCase : int = size if size is not None else {"""shortest_edge""": 256}
__UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : Any = crop_size
__UpperCAmelCase : Optional[Any] = resample
__UpperCAmelCase : Dict = do_rescale
__UpperCAmelCase : List[str] = rescale_factor
__UpperCAmelCase : Dict = offset
__UpperCAmelCase : List[str] = do_normalize
__UpperCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
__UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
__UpperCAmelCase : Any = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = image.astype(np.floataa )
if offset:
__UpperCAmelCase : Tuple = image - (scale / 2)
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : Optional[Any] = to_numpy_array(__UpperCAmelCase )
if do_resize:
__UpperCAmelCase : Optional[int] = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase )
if do_center_crop:
__UpperCAmelCase : Optional[int] = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase )
if do_rescale:
__UpperCAmelCase : int = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , offset=__UpperCAmelCase )
if do_normalize:
__UpperCAmelCase : List[str] = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase )
return image
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : List[Any] = offset if offset is not None else self.offset
__UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : int = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Any = size if size is not None else self.size
__UpperCAmelCase : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : str = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__UpperCAmelCase : int = make_batched(__UpperCAmelCase )
__UpperCAmelCase : Tuple = [
[
self._preprocess_image(
image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , offset=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , )
for img in video
]
for video in videos
]
__UpperCAmelCase : Tuple = {"""pixel_values""": videos}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 16 | 1 |
"""simple docstring"""
import math
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( UpperCAmelCase_ : int = 1_00_01 ) -> int:
'''simple docstring'''
try:
__snake_case : Dict = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__snake_case : list[int] = []
__snake_case : Tuple = 2
while len(UpperCAmelCase_ ) < nth:
if is_prime(UpperCAmelCase_ ):
primes.append(UpperCAmelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
__snake_case : int = 0
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
__snake_case : int = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_A , _A)
def _lowercase (self : Any) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Union[str, Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Any = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[str]) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : int = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(_A).to_dict()
config_dict.pop('image_processor_type')
__snake_case : Dict = CLIPImageProcessor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
__snake_case : Optional[int] = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def _lowercase (self : Union[str, Any]) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(_A) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
__snake_case : Any = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> str:
with self.assertRaisesRegex(
_A , 'clip-base is not a local folder and is not a valid model identifier'):
__snake_case : Tuple = AutoImageProcessor.from_pretrained('clip-base')
def _lowercase (self : Optional[int]) -> int:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : int = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : Tuple) -> Optional[int]:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Tuple) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A):
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
__snake_case : Any = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
__snake_case : Any = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Dict = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def _lowercase (self : int) -> str:
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoImageProcessor.register(_A , _A)
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = CustomImageProcessor.from_pretrained(_A)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : int) -> str:
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = True
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# If remote code is not set, the default is to use local
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 172 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( __lowerCAmelCase : str ):
a__ = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 1_8, 2]
a__ = True if 'large' in model_name or 'huge' in model_name else False
a__ = True if 'large' in model_name or 'huge' in model_name else False
a__ = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a__ = [3, 3, 3, 3]
a__ = [5, 5, 5, 5]
elif "fl4" in model_name:
a__ = [4, 4, 4, 4]
a__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a__ = [3, 3, 3, 3]
if "lrf" in model_name:
a__ = [3, 3, 3, 3]
else:
a__ = [2, 2, 2, 2]
if "tiny" in model_name:
a__ = 9_6
elif "small" in model_name:
a__ = 9_6
elif "base" in model_name:
a__ = 1_2_8
elif "large" in model_name:
a__ = 1_9_2
elif "xlarge" in model_name:
a__ = 2_5_6
elif "huge" in model_name:
a__ = 3_5_2
# set label information
a__ = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
a__ = 'imagenet-22k-id2label.json'
else:
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = {v: k for k, v in idalabel.items()}
a__ = FocalNetConfig(
embed_dim=__lowerCAmelCase , depths=__lowerCAmelCase , focal_levels=__lowerCAmelCase , focal_windows=__lowerCAmelCase , use_conv_embed=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , use_post_layernorm=__lowerCAmelCase , use_layerscale=__lowerCAmelCase , )
return config
def __lowercase ( __lowerCAmelCase : Optional[int] ):
if "patch_embed.proj" in name:
a__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a__ = 'encoder.' + name
if "encoder.layers" in name:
a__ = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
a__ = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
a__ = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a__ = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a__ = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a__ = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
a__ = 'layernorm.weight'
if name == "norm.bias":
a__ = 'layernorm.bias'
if "head" in name:
a__ = name.replace('head' , 'classifier' )
else:
a__ = 'focalnet.' + name
return name
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict=False ):
# fmt: off
a__ = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
a__ = model_name_to_url[model_name]
print('Checkpoint URL: ' , __lowerCAmelCase )
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
a__ = state_dict.pop(__lowerCAmelCase )
a__ = val
a__ = get_focalnet_config(__lowerCAmelCase )
a__ = FocalNetForImageClassification(__lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCAmelCase )
# verify conversion
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = BitImageProcessor(
do_resize=__lowerCAmelCase , size={'shortest_edge': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCAmelCase , crop_size=2_2_4 , do_normalize=__lowerCAmelCase , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase , )
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
a__ = processor(images=__lowerCAmelCase , return_tensors='pt' )
a__ = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a__ = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCAmelCase , atol=1E-4 )
a__ = model(**__lowerCAmelCase )
a__ = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
a__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
a__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
a__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
a__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
a__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
snake_case : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 109 |
from collections import defaultdict
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ):
a__ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__A : int = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_UpperCAmelCase = getattr(a__ , a__ )
if weight_type is not None:
_UpperCAmelCase = getattr(a__ , a__ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(a__ )[0].split('''.''' )[-2]
_UpperCAmelCase = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
_UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCAmelCase = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = '''weight'''
else:
_UpperCAmelCase = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f'Unused weights: {unused_weights}' )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCAmelCase = name.split('''.''' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
_UpperCAmelCase = torch.load(a__ )
_UpperCAmelCase = WavLMConfigOrig(checkpoint['''cfg'''] )
_UpperCAmelCase = WavLMOrig(a__ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
_UpperCAmelCase = WavLMConfig.from_pretrained(a__ )
else:
_UpperCAmelCase = WavLMConfig()
_UpperCAmelCase = WavLMModel(a__ )
recursively_load_weights(a__ , a__ )
hf_wavlm.save_pretrained(a__ )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 260 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : int = MBartConfig
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = '''gelu'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModel(config=_A ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
__SCREAMING_SNAKE_CASE = past_key_values[1]
def __lowercase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) -> str:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCamelCase__ : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Union[str, Any] = False
def _A ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCamelCase__ : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCamelCase__ : List[str] = '''facebook/mbart-large-en-ro'''
@cached_property
def _A ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _A ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 257 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
lowerCamelCase__ : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase__ : Union[str, Any] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
lowerCamelCase__ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase__ : List[Any] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
lowerCamelCase__ : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase__ : Optional[Any] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
lowerCamelCase__ : Any = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> str:
"""simple docstring"""
if split_mlp_wi:
lowerCamelCase__ : Tuple = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
lowerCamelCase__ : str = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
lowerCamelCase__ : Optional[int] = (wi_a, wi_a)
else:
lowerCamelCase__ : int = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
lowerCamelCase__ : Any = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def _a ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = traverse_util.flatten_dict(variables['''target'''] )
lowerCamelCase__ : Any = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase__ : Any = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowerCamelCase__ : Any = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase__ : int = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : str = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowerCamelCase__ : Dict = layer_norm
lowerCamelCase__ : List[str] = k.T
lowerCamelCase__ : str = o.T
lowerCamelCase__ : Union[str, Any] = q.T
lowerCamelCase__ : List[str] = v.T
# Block i, layer 1 (MLP).
lowerCamelCase__ : Union[str, Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : str = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowerCamelCase__ : Any = layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] = wi[0].T
lowerCamelCase__ : Any = wi[1].T
else:
lowerCamelCase__ : Any = wi.T
lowerCamelCase__ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Dict = tax_relpos_bias_lookup(
UpperCAmelCase , UpperCAmelCase , '''encoder''' ).T
lowerCamelCase__ : Tuple = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCamelCase__ : Optional[Any] = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''encoder''' ).T
lowerCamelCase__ : Optional[Any] = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : Optional[int] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowerCamelCase__ : Optional[Any] = layer_norm
lowerCamelCase__ : List[Any] = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : Any = q.T
lowerCamelCase__ : List[str] = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase__ : Any = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowerCamelCase__ : Dict = layer_norm
lowerCamelCase__ : Optional[int] = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : str = q.T
lowerCamelCase__ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCamelCase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : List[str] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowerCamelCase__ : int = layer_norm
if split_mlp_wi:
lowerCamelCase__ : Dict = wi[0].T
lowerCamelCase__ : str = wi[1].T
else:
lowerCamelCase__ : Dict = wi.T
lowerCamelCase__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Optional[int] = tax_relpos_bias_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' ).T
lowerCamelCase__ : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase__ : Optional[Any] = old['''decoder/logits_dense/kernel'''].T
return new
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCamelCase__ : Any = state_dict['''shared.weight''']
return state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowerCamelCase__ : Dict = convert_tax_to_pytorch(
UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase , scalable_attention=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = MTaConfig.from_json_file(UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase__ : Any = UMTaEncoderModel(UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_A : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 265 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , A : Union[str, Any] , A : Any=7 , A : Optional[int]=3 , A : Tuple=3_0 , A : List[Any]=4_0_0 , A : str=True , A : Optional[int]=None , A : Tuple=True , A : Union[str, Any]=1 / 2_5_5 , A : Any=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Optional[int]=[0.5, 0.5, 0.5] , A : str=True , ) ->List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Union[str, Any] = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : str = image_std
lowerCamelCase__ : List[Any] = do_pad
def __lowerCamelCase ( self : List[str] ) ->Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self : Tuple , A : int , A : List[str]=False ) ->int:
if not batched:
lowerCamelCase__ : Union[str, Any] = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : List[Any] = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Any = self.size['''shortest_edge''']
else:
lowerCamelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase__ : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : Optional[int] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self : Dict ) ->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''rescale_factor''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase__ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , A )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
pass
def __lowerCamelCase ( self : str ) ->Optional[Any]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Any ) ->Any:
# Initialize image_processing
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self : Tuple ) ->List[Any]:
# prepare image and target
lowerCamelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCamelCase__ : List[str] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase__ : Optional[int] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
lowerCamelCase__ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase__ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
# prepare image, target and masks_path
lowerCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Optional[Any] = json.loads(f.read() )
lowerCamelCase__ : Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase__ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase__ : Any = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
lowerCamelCase__ : Tuple = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 265 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding='utf-8', check=snake_case__, )
assert hasattr(self, 'env' )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=1 ) -> Tuple:
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-single""", instance_count=snake_case__, instance_type=self.instance_type, debugger_hook_config=snake_case__, hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version='py36', )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds', 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, snake_case__ )
| 119 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase : List[Any] =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
return [
int(10_00 * (box[0] / width)),
int(10_00 * (box[1] / height)),
int(10_00 * (box[2] / width)),
int(10_00 * (box[3] / height)),
]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None):
UpperCamelCase_ = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase_ = to_pil_image(_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = pil_image.size
UpperCamelCase_ = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="dict" , config=_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase_ = [idx for idx, word in enumerate(_lowerCAmelCase) if not word.strip()]
UpperCamelCase_ = [word for idx, word in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase_ = []
for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(_lowerCAmelCase)
# finally, normalize the bounding boxes
UpperCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase))
assert len(_lowerCAmelCase) == len(_lowerCAmelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = apply_ocr
UpperCamelCase_ = ocr_lang
UpperCamelCase_ = tesseract_config
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase_ = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase_ = []
UpperCamelCase_ = []
for image in images:
UpperCamelCase_ , UpperCamelCase_ = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase_ = [flip_channel_order(snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
UpperCamelCase_ = words_batch
UpperCamelCase_ = boxes_batch
return data
| 128 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Optional[Any] =logging.get_logger(__name__)
class __a ( A__ ):
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) | 368 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Dict=18 , SCREAMING_SNAKE_CASE : str=30 , SCREAMING_SNAKE_CASE : Union[str, Any]=4_00 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : int=True , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : Dict = min_resolution
UpperCamelCase__ : List[str] = max_resolution
UpperCamelCase__ : Union[str, Any] = do_resize
UpperCamelCase__ : Optional[Any] = size_divisor
UpperCamelCase__ : int = do_rescale
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : List[Any] = GLPNImageProcessor if is_vision_available() else None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = GLPNImageProcessingTester(self )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size_divisor" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "resample" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_rescale" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 196 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = "resnet"
__UpperCamelCase: Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Tuple , A : Optional[int]=3 , A : Union[str, Any]=64 , A : Dict=[256, 512, 1024, 2048] , A : Tuple=[3, 4, 6, 3] , A : Optional[Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Optional[int]=None , A : Union[str, Any]=None , **A : List[Any] , ):
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : int = embedding_size
_UpperCAmelCase : Union[str, Any] = hidden_sizes
_UpperCAmelCase : int = depths
_UpperCAmelCase : Any = layer_type
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = downsample_in_first_stage
_UpperCAmelCase : Dict = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(A ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = version.parse("1.11" )
@property
def _A ( self : Dict ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : Dict ):
return 1E-3
| 31 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=13 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , __lowerCAmelCase : int=[2, 2, 3, 2] , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : str=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : Tuple=["stage2", "stage3", "stage4"] , __lowerCAmelCase : Tuple=[2, 3, 4] , __lowerCAmelCase : Tuple=None , ) -> List[str]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def a_ ( self : int ) -> List[Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a_ ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
A__ = ConvNextModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
A__ = ConvNextForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a_ ( self : str ) -> List[str]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A (_a , _a , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase : str = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = True
__lowerCamelCase : Any = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Dict = False
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = ConvNextModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def a_ ( self : str ) -> Tuple:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def a_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def a_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A__ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class A (unittest.TestCase , _a ):
'''simple docstring'''
__lowerCamelCase : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCamelCase : int = ConvNextConfig
__lowerCamelCase : Dict = False
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
A__ = ConvNextModelTester(self )
| 368 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
class UpperCAmelCase :
def __init__(self : int ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {}
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase__ , " -> " , " -> ".join([str(UpperCAmelCase__ ) for j in self.vertex[i]] ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase__ )
else:
# else make a new vertex
snake_case : List[Any] = [to_vertex]
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int , snake_case__ : list ) -> int:
'''simple docstring'''
snake_case : List[str] = True
print(UpperCAmelCase__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 59 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[]
UpperCAmelCase : int =[]
UpperCAmelCase : str ={
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCAmelCase : Tuple =len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(__lowerCAmelCase ) , '''Postfix'''.center(__lowerCAmelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''''''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''''''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] =list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
UpperCAmelCase : Optional[Any] =''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase : int ='''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__snake_case = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__snake_case = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 78 | from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Tuple:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Any = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 78 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = " " ) -> list:
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = 0
for index, char in enumerate(__lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase__ : Union[str, Any] = index + 1
elif index + 1 == len(__lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 1 |
from __future__ import annotations
from collections.abc import Callable
__UpperCamelCase : List[Any] = list[list[float | int]]
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Matrix:
a = len(__lowerCamelCase )
a = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
a = 42
a = 42
a = 42
a = 42
a = 42
a = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
a = matrix[row][col]
a = vector[row][0]
a = 0
a = 0
while row < size and col < size:
# pivoting
a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase , __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
a , a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCamelCase ):
a = augmented[rowa][col] / augmented[row][col]
a = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCamelCase ):
for row in range(__lowerCamelCase ):
a = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCamelCase )
]
def __A ( __lowerCamelCase ) -> Callable[[int], int]:
a = len(__lowerCamelCase )
a = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
a = [[0] for _ in range(__lowerCamelCase )]
a = 42
a = 42
a = 42
a = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
a = (x_val + 1) ** (size - col - 1)
a = y_val
a = solve(__lowerCamelCase , __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def __A ( __lowerCamelCase ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __A ( __lowerCamelCase = question_function , __lowerCamelCase = 10 ) -> int:
a = [func(__lowerCamelCase ) for x_val in range(1 , order + 1 )]
a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
a = 0
a = 42
a = 42
for poly in polynomials:
a = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 347 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
snake_case__ : str = '''base_with_context'''
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase : List[Any] = weights[f'''layers_{lyr_num}''']
lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase : Dict = ly_weight['''attention''']
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _snake_case ( _snake_case : int , _snake_case : Optional[int] ):
lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase : Tuple = weights[f'''layers_{lyr_num}''']
lowerCAmelCase : int = ly_weight['''attention''']
lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _snake_case ( _snake_case : List[str] , _snake_case : str ):
lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCAmelCase : Tuple = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_snake_case )
lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase : int = weights[f'''layers_{lyr_num}''']
lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCAmelCase : Any = ly_weight['''self_attention''']
lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase : str = ly_weight['''MultiHeadDotProductAttention_0''']
lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase : str = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _snake_case ( _snake_case : Tuple ):
lowerCAmelCase : int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase : Any = jnp.tree_util.tree_map(onp.array , _snake_case )
lowerCAmelCase : Union[str, Any] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCAmelCase : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCAmelCase : List[Any] = inference.parse_training_gin_file(_snake_case , _snake_case )
lowerCAmelCase : List[Any] = inference.InferenceModel(args.checkpoint_path , _snake_case )
lowerCAmelCase : Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCAmelCase : Optional[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCAmelCase : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCAmelCase : Any = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase : Union[str, Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , _snake_case )
lowerCAmelCase : Dict = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , _snake_case )
lowerCAmelCase : Tuple = load_decoder(ta_checkpoint['''target''']['''decoder'''] , _snake_case )
lowerCAmelCase : int = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCAmelCase : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=_snake_case , continuous_encoder=_snake_case , decoder=_snake_case , scheduler=_snake_case , melgan=_snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
snake_case__ : Dict = parser.parse_args()
main(args)
| 60 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __UpperCamelCase , unittest.TestCase ):
a = FunnelTokenizer
a = FunnelTokenizerFast
a = True
a = True
def lowerCamelCase_ ( self: Any ):
super().setUp()
lowerCamelCase__ : Tuple = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Any ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase__: Tuple ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = 'UNwant\u00E9d,running'
lowerCamelCase__ : str = 'unwanted, running'
return input_text, output_text
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
lowerCamelCase__ : Optional[int] = tokenizer("""UNwant\u00E9d,running""" )
lowerCamelCase__ : List[str] = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
lowerCamelCase__ : Tuple = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 351 |
'''simple docstring'''
from collections import deque
class _lowercase :
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : int = process_name # process name
lowerCamelCase__ : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : List[str] = arrival_time
lowerCamelCase__ : Tuple = burst_time # remaining burst time
lowerCamelCase__ : str = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Optional[Any] = 0 # time from arrival time to completion time
class _lowercase :
def __init__( self: Any , UpperCamelCase__: int , UpperCamelCase__: list[int] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int , ):
# total number of mlfq's queues
lowerCamelCase__ : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[Any] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : int = queue
# current time
lowerCamelCase__ : Optional[int] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : int = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self: Any , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : Optional[int] = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : List[Any] = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self: int , UpperCamelCase__: deque[Process] ):
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: deque[Process] ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Dict = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : List[str] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : int = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
lowerCamelCase__ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : List[Any] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self: Tuple ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : str = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A : Optional[Any] =Process('''P1''', 0, 53)
_A : List[Any] =Process('''P2''', 0, 17)
_A : Any =Process('''P3''', 0, 68)
_A : Tuple =Process('''P4''', 0, 24)
_A : int =3
_A : Tuple =[17, 25]
_A : List[Any] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_A : Dict =Process('''P1''', 0, 53)
_A : Union[str, Any] =Process('''P2''', 0, 17)
_A : int =Process('''P3''', 0, 68)
_A : Dict =Process('''P4''', 0, 24)
_A : List[str] =3
_A : List[Any] =[17, 25]
_A : Any =deque([Pa, Pa, Pa, Pa])
_A : List[str] =MLFQ(number_of_queues, time_slices, queue, 0)
_A : Union[str, Any] =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 129 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[Any] = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
snake_case__ : List[Any] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case__ : Optional[int] = None
# the split name of split_dict takes over the name of the split info object
snake_case__ : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="""my_dataset""" )] )
def __snake_case( _lowerCAmelCase ) -> Dict:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case__ : Any = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 35 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure) | 370 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 10 , a_ : int = 22 ) -> int:
__SCREAMING_SNAKE_CASE :Optional[int] = range(1 , a_ )
__SCREAMING_SNAKE_CASE :List[Any] = range(1 , a_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }') | 239 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=0) -> Union[str, Any]:
'''simple docstring'''
if name is None:
__UpperCamelCase : Any = None
else:
__UpperCamelCase : Optional[int] = '.' * max(0 , spaces - 2) + '# {:' + str(50 - spaces) + 's}'
__UpperCamelCase : Tuple = fmt.format(_lowerCamelCase)
# Print and recurse (if needed).
if isinstance(_lowerCamelCase , _lowerCamelCase):
if msg is not None:
print(_lowerCamelCase)
for k in val.keys():
recursive_print(_lowerCamelCase , val[k] , spaces + 2)
elif isinstance(_lowerCamelCase , torch.Tensor):
print(_lowerCamelCase , ":" , val.size())
else:
print(_lowerCamelCase , ":" , _lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any]) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__UpperCamelCase : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
__UpperCamelCase : Dict = param.view(*_lowerCamelCase)
__UpperCamelCase : str = param.transpose(0 , 2)
__UpperCamelCase : Any = param.transpose(1 , 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__UpperCamelCase : Dict = (num_heads, num_splits, hidden_size) + input_shape[1:]
__UpperCamelCase : Optional[int] = param.view(*_lowerCamelCase)
__UpperCamelCase : List[str] = param.transpose(0 , 1).contiguous()
__UpperCamelCase : List[Any] = param.view(*_lowerCamelCase)
return param
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {}
# old versions did not store training args
__UpperCamelCase : Union[str, Any] = input_state_dict.get("args" , _lowerCamelCase)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__UpperCamelCase : Optional[Any] = ds_args.padded_vocab_size
__UpperCamelCase : List[Any] = ds_args.max_position_embeddings
__UpperCamelCase : Dict = ds_args.hidden_size
__UpperCamelCase : List[Any] = ds_args.num_layers
__UpperCamelCase : str = ds_args.num_attention_heads
__UpperCamelCase : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__UpperCamelCase : Optional[Any] = config.n_head
# The hidden_size per head.
__UpperCamelCase : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__UpperCamelCase : Optional[int] = input_state_dict['checkpoint_version']
else:
__UpperCamelCase : List[str] = 0.0
# The model.
__UpperCamelCase : Tuple = input_state_dict['model']
# The language model.
__UpperCamelCase : List[Any] = model['language_model']
# The embeddings.
__UpperCamelCase : Union[str, Any] = lm['embedding']
# The word embeddings.
__UpperCamelCase : Optional[int] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
__UpperCamelCase : Union[str, Any] = word_embeddings[: config.vocab_size, :]
__UpperCamelCase : Dict = word_embeddings
# The position embeddings.
__UpperCamelCase : int = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__UpperCamelCase : Dict = pos_embeddings.size(0)
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match')
# Store the position embeddings.
__UpperCamelCase : Optional[Any] = pos_embeddings
# The transformer.
__UpperCamelCase : int = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
__UpperCamelCase : Dict = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
# The simple map of names for "automated" rules.
__UpperCamelCase : int = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__UpperCamelCase : Union[str, Any] = layer_re.match(_lowerCamelCase)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__UpperCamelCase : int = int(m.group(1))
# The name of the operation.
__UpperCamelCase : List[Any] = m.group(2)
# Is it a weight or a bias?
__UpperCamelCase : List[Any] = m.group(3)
# The name of the layer.
__UpperCamelCase : Tuple = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm"):
__UpperCamelCase : Optional[Any] = 'ln_1' if op_name.startswith("input") else 'ln_2'
__UpperCamelCase : str = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__UpperCamelCase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa)).view(
1 , 1 , _lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
__UpperCamelCase : List[Any] = torch.tensor(-1e4 , dtype=torch.floataa)
__UpperCamelCase : Union[str, Any] = masked_bias
__UpperCamelCase : List[Any] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase)
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__UpperCamelCase : Any = out_val.transpose(0 , 1).contiguous()
# Store.
__UpperCamelCase : List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__UpperCamelCase : Union[str, Any] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase)
# Store. No change of shape.
__UpperCamelCase : Optional[int] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__UpperCamelCase : List[Any] = megatron_to_transformers[op_name]
__UpperCamelCase : str = val.transpose(0 , 1)
# Copy the bias.
elif weight_or_bias == "bias":
__UpperCamelCase : Union[str, Any] = megatron_to_transformers[op_name]
__UpperCamelCase : Optional[int] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__UpperCamelCase : str = transformer['final_layernorm.weight']
__UpperCamelCase : str = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
__UpperCamelCase : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true")
parser.add_argument(
"path_to_checkpoint" , type=_lowerCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_lowerCamelCase , help="An optional config json file describing the pre-trained model." , )
__UpperCamelCase : List[Any] = parser.parse_args()
# Extract the basename.
__UpperCamelCase : Dict = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}')
if args.path_to_checkpoint.endswith(".zip"):
with zipfile.ZipFile(args.path_to_checkpoint , "r") as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
__UpperCamelCase : List[str] = torch.load(_lowerCamelCase , map_location="cpu")
else:
__UpperCamelCase : str = torch.load(args.path_to_checkpoint , map_location="cpu")
__UpperCamelCase : List[str] = input_state_dict.get("args" , _lowerCamelCase)
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__UpperCamelCase : int = 'gelu_fast'
elif ds_args.openai_gelu:
__UpperCamelCase : str = 'gelu_new'
else:
__UpperCamelCase : Optional[int] = 'gelu'
else:
# in the very early days this used to be "gelu_new"
__UpperCamelCase : List[str] = 'gelu_new'
# Spell out all parameters in case the defaults change.
__UpperCamelCase : str = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=_lowerCamelCase , summary_activation=_lowerCamelCase , summary_proj_to_labels=_lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
__UpperCamelCase : List[str] = GPTaConfig.from_json_file(args.config_file)
__UpperCamelCase : Optional[int] = ['GPT2LMHeadModel']
# Convert.
print("Converting")
__UpperCamelCase : Tuple = convert_megatron_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase , _lowerCamelCase)
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__UpperCamelCase : Any = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__UpperCamelCase : Optional[int] = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
__UpperCamelCase : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}')
else:
__UpperCamelCase : Any = 'gpt2'
__UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = type(_lowerCamelCase).__name__
__UpperCamelCase : List[str] = tokenizer_class
# Store the config to file.
print("Saving config")
config.save_pretrained(_lowerCamelCase)
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files')
tokenizer.save_pretrained(_lowerCamelCase)
# Store the state_dict to file.
__UpperCamelCase : int = os.path.join(_lowerCamelCase , "pytorch_model.bin")
print(F'Saving checkpoint to \"{output_checkpoint_file}\"')
torch.save(_lowerCamelCase , _lowerCamelCase)
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 232 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ : str = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Any = jax.device_count()
UpperCamelCase_ : List[str] = num_samples * [prompt]
UpperCamelCase_ : List[Any] = sd_pipe.prepare_inputs(snake_case )
UpperCamelCase_ : Dict = replicate(snake_case )
UpperCamelCase_ : Optional[Any] = shard(snake_case )
UpperCamelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCamelCase_ : Tuple = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase_ : Optional[Any] = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=2_5 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : List[str] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'stabilityai/stable-diffusion-2'
UpperCamelCase_, UpperCamelCase_ : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case , subfolder='scheduler' )
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
snake_case , scheduler=snake_case , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ : Optional[int] = scheduler_params
UpperCamelCase_ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Union[str, Any] = jax.device_count()
UpperCamelCase_ : Union[str, Any] = num_samples * [prompt]
UpperCamelCase_ : Tuple = sd_pipe.prepare_inputs(snake_case )
UpperCamelCase_ : List[Any] = replicate(snake_case )
UpperCamelCase_ : Optional[Any] = shard(snake_case )
UpperCamelCase_ : Tuple = jax.random.PRNGKey(0 )
UpperCamelCase_ : str = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase_ : str = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=2_5 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : Union[str, Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 175 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : str = '''sample'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (3, 32, 32)
@property
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :str ) -> Tuple:
a__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = UNetaDModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Any ) -> str:
return (4, 32, 32)
@property
def lowerCamelCase__( self :Any ) -> Dict:
return (4, 32, 32)
def lowerCamelCase__( self :int ) -> int:
a__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Any:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
a__ = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
a__ = model_accelerate(__snake_case ,__snake_case )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
a__ = model_normal_load(__snake_case ,__snake_case )['sample']
assert torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__snake_case )
a__ = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 ) )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = UNetaDModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any]=(32, 32) ) -> Optional[int]:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__( self :str ) -> Tuple:
a__ , a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__snake_case )
a__ = noise
a__ = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (2_56, 2_56)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :Dict ) -> int:
a__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :int ) -> str:
# not required for this model
pass
| 109 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Tuple = torch.device('''cpu''')
def __lowercase ( ):
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __lowercase ( __lowerCAmelCase : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : int ):
a__ = []
for k in state_dict.keys():
a__ = k
if ".pwconv" in k:
a__ = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ = k_new.split('.' )
if ls[2].isdigit():
a__ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ = 1_0_0_0
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ = [3, 3, 6, 4]
a__ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
a__ = [3, 3, 9, 6]
a__ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
a__ = [4, 3, 1_0, 5]
a__ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
a__ = [4, 4, 1_2, 6]
a__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )
else:
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = checkpoint
a__ = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
a__ = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
a__ = prepare_img()
a__ = ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ = processor(images=__lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
a__ = get_expected_output(__lowerCAmelCase )
a__ = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
snake_case : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_a = logging.get_logger('''transformers.models.speecht5''')
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
hf_model.apply_weight_norm()
__lowerCAmelCase: Optional[Any] = checkpoint['input_conv.weight_g']
__lowerCAmelCase: List[str] = checkpoint['input_conv.weight_v']
__lowerCAmelCase: Any = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase: List[str] = checkpoint[f'''upsamples.{i}.1.weight_g''']
__lowerCAmelCase: int = checkpoint[f'''upsamples.{i}.1.weight_v''']
__lowerCAmelCase: Union[str, Any] = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase: Tuple = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__lowerCAmelCase: Optional[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__lowerCAmelCase: List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__lowerCAmelCase: Any = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__lowerCAmelCase: Optional[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__lowerCAmelCase: Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__lowerCAmelCase: int = checkpoint['output_conv.1.weight_g']
__lowerCAmelCase: int = checkpoint['output_conv.1.weight_v']
__lowerCAmelCase: str = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
__lowerCAmelCase: Optional[int] = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = SpeechTaHifiGanConfig()
__lowerCAmelCase: Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = torch.load(SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint['model']['generator'] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = np.load(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = stats[0].reshape(-1 )
__lowerCAmelCase: Any = stats[1].reshape(-1 )
__lowerCAmelCase: List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
__lowerCAmelCase: Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_a = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 322 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __A ( )-> int:
"""simple docstring"""
print('Making key files...' )
make_key_files('rsa' , 1_024 )
print('Key files generation successful.' )
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
print('Generating prime p...' )
_UpperCAmelCase = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
print('Generating prime q...' )
_UpperCAmelCase = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
_UpperCAmelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
_UpperCAmelCase = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
_UpperCAmelCase = (n, e)
_UpperCAmelCase = (n, d)
return (public_key, private_key)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_UpperCAmelCase , _UpperCAmelCase = generate_key(_SCREAMING_SNAKE_CASE )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 39 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def a__ ( _SCREAMING_SNAKE_CASE = 1_777 , _SCREAMING_SNAKE_CASE = 1_855 , _SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
UpperCamelCase = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : int ):
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = BlipImageProcessor()
__UpperCAmelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__UpperCAmelCase = BlipaProcessor(_lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def a ( self : Dict , **_lowercase : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def a ( self : int , **_lowercase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def a ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def a ( self : Any ):
__UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self : str ):
__UpperCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCAmelCase = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
__UpperCAmelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(_lowercase , return_tensors='''np''' )
__UpperCAmelCase = processor(images=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self : List[str] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = processor(text=_lowercase )
__UpperCAmelCase = tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : str ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def a ( self : int ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase = processor.batch_decode(_lowercase )
__UpperCAmelCase = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=_lowercase , images=_lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 86 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 86 | 1 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# Base Case
if curr_ind == len(_UpperCAmelCase):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCAmelCase)):
if valid_connection(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# Insert current vertex into path as next transition
SCREAMING_SNAKE_CASE = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , curr_ind + 1):
return True
# Backtrack
SCREAMING_SNAKE_CASE = -1
return False
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 0):
SCREAMING_SNAKE_CASE = [-1] * (len(_UpperCAmelCase) + 1)
# initialize start and end of path with starting index
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , 1) else []
| 137 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ : List[Any] = get_logger(__name__)
class _snake_case ( enum.Enum ):
_lowercase : Any = '''all_checks'''
_lowercase : str = '''basic_checks'''
_lowercase : str = '''no_checks'''
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if expected_checksums is None:
logger.info('Unable to verify checksums.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE = ' for ' + verification_name if verification_name is not None else ''
if len(_UpperCAmelCase) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error')
logger.info('All the checksums matched successfully' + for_verification_name)
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if expected_splits is None:
logger.info('Unable to verify splits sizes.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase))
logger.info('All the splits matched successfully.')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = True):
if record_checksum:
SCREAMING_SNAKE_CASE = shaaaa()
with open(_UpperCAmelCase , 'rb') as f:
for chunk in iter(lambda: f.read(1 << 20) , B''):
m.update(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = m.hexdigest()
else:
SCREAMING_SNAKE_CASE = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase), "checksum": checksum}
def lowerCamelCase__ (_UpperCAmelCase):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 137 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowercase__ ):
lowercase : Tuple = ['image_processor', 'tokenizer']
lowercase : Any = 'ViTImageProcessor'
lowercase : str = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self :Optional[int] ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,**_UpperCamelCase :Tuple ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase_ ,)
snake_case_ : Dict = kwargs.pop("""feature_extractor""" )
snake_case_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self :Dict ,_UpperCamelCase :int=None ,_UpperCamelCase :str=None ,_UpperCamelCase :str=None ,_UpperCamelCase :Optional[Any]=None ,**_UpperCamelCase :int ):
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
snake_case_ : Union[str, Any] = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if visual_prompt is not None:
snake_case_ : str = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if visual_prompt is not None and images is not None:
snake_case_ : List[Any] = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case_ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case_ : Any = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def a__ ( self :Tuple ,*_UpperCamelCase :Optional[Any] ,**_UpperCamelCase :Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def a__ ( self :List[Any] ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Tuple ):
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def a__ ( self :Any ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def a__ ( self :str ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase_ ,)
return self.image_processor
| 354 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__A : Dict = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Optional[int] = 'ernie_m'
lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,):
super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Optional[int] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : int = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : Union[str, Any] = classifier_dropout
snake_case_ : Tuple = is_decoder
snake_case_ : int = act_dropout | 8 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""audio-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""audio""": Audio()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
__UpperCAmelCase : str ="audio"
__UpperCAmelCase : str ="labels"
def snake_case ( self , __a ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.label_schema.copy()
__lowerCAmelCase = features[self.label_column]
__lowerCAmelCase = label_schema
return task_template
@property
def snake_case ( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 57 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : str = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = MBartTokenizer
lowerCAmelCase_ = MBartTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase_ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ : int = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : str = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : int = tempfile.mkdtemp()
lowercase_ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase_ : Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
lowercase_ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : int = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
lowercase_ : str = tempfile.mkdtemp()
lowercase_ : Optional[Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
lowercase_ : Optional[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
lowercase_ : List[Any] = tempfile.mkdtemp()
lowercase_ : Optional[Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = '''facebook/mbart-large-en-ro'''
lowerCAmelCase_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCAmelCase_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCAmelCase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
lowercase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowercase_ : str = 1
return cls
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
lowercase_ : Union[str, Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = 10
lowercase_ : Dict = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowercase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors='''pt''' )
lowercase_ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=10 , return_tensors='''pt''' )
lowercase_ : List[Any] = targets['''input_ids''']
lowercase_ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 356 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = qkv_bias
lowercase_ : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
| 264 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase_ = "bert"
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any]=30_522 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=3_072 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Tuple=1E-12 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Tuple="absolute" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : str , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : int ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 225 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[int] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : List[str] , __magic_name__ : str ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : str , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : List[str] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 118 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( A__ ):
A__ = (DEISMultistepScheduler,)
A__ = (('num_inference_steps', 25),)
def A ( self : Optional[int] , **_a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**_a )
return config
def A ( self : Union[str, Any] , _a : Optional[Any]=0 , **_a : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('num_inference_steps' , _a )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**_a )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_SCREAMING_SNAKE_CASE =scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: new_scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , **_a ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
def A ( self : Optional[int] , _a : List[Any]=0 , **_a : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('num_inference_steps' , _a )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_SCREAMING_SNAKE_CASE =scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: new_scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , **_a ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self : int , _a : Optional[Any]=None , **_a : Any ) -> Optional[int]:
'''simple docstring'''
if scheduler is None:
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**_a )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**_a )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a ).prev_sample
return sample
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('num_inference_steps' , _a )
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
if num_inference_steps is not None and hasattr(_a , 'set_timesteps' ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a , 'set_timesteps' ):
_SCREAMING_SNAKE_CASE =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.10]
_SCREAMING_SNAKE_CASE =dummy_past_residuals[: scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE =scheduler.timesteps[5]
_SCREAMING_SNAKE_CASE =scheduler.timesteps[6]
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , **_a ).prev_sample
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , **_a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DEISMultistepScheduler(**self.get_scheduler_config() )
_SCREAMING_SNAKE_CASE =self.full_loop(scheduler=_a )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
_SCREAMING_SNAKE_CASE =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =UniPCMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =DEISMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE =self.full_loop(scheduler=_a )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type='deis' , solver_order=_a , solver_type=_a , )
def A ( self : int ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
_SCREAMING_SNAKE_CASE =self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def A ( self : Any ) -> str:
'''simple docstring'''
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def A ( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.full_loop()
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def A ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.full_loop(prediction_type='v_prediction' )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
| 47 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : List[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : Any = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase : Optional[Any] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase : Optional[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Tuple ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A ( self : Union[str, Any] , _a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A ( self : int , _a : Tuple , _a : List[str] , _a : List[str]=0.9 , _a : Dict=3 , _a : Optional[int]=0.5 ) -> Optional[int]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(
word_tokenize(_a ) , word_tokenize(_a ) , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
else:
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(_a , _a , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
return {"meteor": np.mean(_a )}
| 47 | 1 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Any = PriorTransformer
_lowerCamelCase :List[Any] = "hidden_states"
@property
def _lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Dict = 8
lowerCAmelCase__ : Dict = 7
lowerCAmelCase__ : str = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase )
lowerCAmelCase__ : str = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase )
lowerCAmelCase__ : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self , UpperCamelCase=0 ) -> Tuple:
"""simple docstring"""
torch.manual_seed(UpperCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Union[str, Any] = 8
lowerCAmelCase__ : List[Any] = 7
lowerCAmelCase__ : str = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return (4, 8)
@property
def _lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return (4, 8)
def _lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
lowerCAmelCase__ : Dict = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase )
lowerCAmelCase__ : int = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Tuple = self.model_class(**UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : str = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , UpperCamelCase )
def _lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Any = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
lowerCAmelCase__ : Union[str, Any] = model.to(UpperCamelCase )
if hasattr(UpperCamelCase , """set_default_attn_processor""" ):
model.set_default_attn_processor()
lowerCAmelCase__ : str = self.get_dummy_seed_input()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**UpperCamelCase )[0]
lowerCAmelCase__ : Any = output[0, :5].flatten().cpu()
print(UpperCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowerCAmelCase__ : List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1E-2 ) )
@slow
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self , UpperCamelCase=1 , UpperCamelCase=7_68 , UpperCamelCase=77 , UpperCamelCase=0 ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(UpperCamelCase )
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : List[Any] = embedding_dim
lowerCAmelCase__ : Union[str, Any] = num_embeddings
lowerCAmelCase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowerCAmelCase__ : Any = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _lowerCAmelCase ( self , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.get_dummy_seed_input(seed=UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**UpperCamelCase )[0]
assert list(sample.shape ) == [1, 7_68]
lowerCAmelCase__ : Optional[int] = sample[0, :8].flatten().cpu()
print(UpperCamelCase )
lowerCAmelCase__ : Dict = torch.tensor(UpperCamelCase )
assert torch_all_close(UpperCamelCase , UpperCamelCase , atol=1E-3 )
| 361 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase__ : Union[str, Any] = Vector()
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase ) , 4 )
def _lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Vector([1, 2] )
lowerCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase__ : Union[str, Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase__ : List[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : int = Vector([1, 2, 3] )
lowerCAmelCase__ : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Vector([1, 2, 3] )
lowerCAmelCase__ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = Vector([1, 2, 3] )
lowerCAmelCase__ : Any = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase__ : Any = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
lowerCAmelCase__ : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase , UpperCamelCase ) ) , """(3,4,7)""" )
def _lowerCAmelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase__ : Any = x.copy()
self.assertEqual(str(UpperCamelCase ) , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase ) , """(0,1,0)""" )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : int = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 212 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
UpperCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : List[str] = strip_accents
__A : Optional[int] = tokenize_chinese_chars
__A : Union[str, Any] = normalizer_class(**_A )
__A : Dict = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[int] = [self.sep_token_id]
__A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : List[str] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 280 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> List[str]:
_SCREAMING_SNAKE_CASE = SwinvaConfig()
_SCREAMING_SNAKE_CASE = swinva_name.split("_" )
_SCREAMING_SNAKE_CASE = name_split[1]
if "to" in name_split[3]:
_SCREAMING_SNAKE_CASE = int(name_split[3][-3:] )
else:
_SCREAMING_SNAKE_CASE = int(name_split[3] )
if "to" in name_split[2]:
_SCREAMING_SNAKE_CASE = int(name_split[2][-2:] )
else:
_SCREAMING_SNAKE_CASE = int(name_split[2][6:] )
if model_size == "tiny":
_SCREAMING_SNAKE_CASE = 96
_SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
_SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
_SCREAMING_SNAKE_CASE = 96
_SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
_SCREAMING_SNAKE_CASE = 1_28
_SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
_SCREAMING_SNAKE_CASE = 1_92
_SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "to" in swinva_name:
_SCREAMING_SNAKE_CASE = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_SCREAMING_SNAKE_CASE = 2_18_41
_SCREAMING_SNAKE_CASE = "huggingface/label-files"
_SCREAMING_SNAKE_CASE = "imagenet-22k-id2label.json"
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__A , __A , repo_type="dataset" ) , "r" ) )
_SCREAMING_SNAKE_CASE = {int(__A ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
_SCREAMING_SNAKE_CASE = 10_00
_SCREAMING_SNAKE_CASE = "huggingface/label-files"
_SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__A , __A , repo_type="dataset" ) , "r" ) )
_SCREAMING_SNAKE_CASE = {int(__A ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = img_size
_SCREAMING_SNAKE_CASE = num_classes
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = window_size
return config
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[Any]:
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_SCREAMING_SNAKE_CASE = "encoder." + name
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_SCREAMING_SNAKE_CASE = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_SCREAMING_SNAKE_CASE = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_SCREAMING_SNAKE_CASE = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_SCREAMING_SNAKE_CASE = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_SCREAMING_SNAKE_CASE = "layernorm.weight"
if name == "norm.bias":
_SCREAMING_SNAKE_CASE = "layernorm.bias"
if "head" in name:
_SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
else:
_SCREAMING_SNAKE_CASE = "swinv2." + name
return name
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] ) -> Dict:
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(__A )
if "mask" in key:
continue
elif "qkv" in key:
_SCREAMING_SNAKE_CASE = key.split("." )
_SCREAMING_SNAKE_CASE = int(key_split[1] )
_SCREAMING_SNAKE_CASE = int(key_split[3] )
_SCREAMING_SNAKE_CASE = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Union[str, Any] ) -> List[str]:
_SCREAMING_SNAKE_CASE = timm.create_model(__A , pretrained=__A )
timm_model.eval()
_SCREAMING_SNAKE_CASE = get_swinva_config(__A )
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification(__A )
model.eval()
_SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , __A )
model.load_state_dict(__A )
_SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__A , stream=__A ).raw )
_SCREAMING_SNAKE_CASE = image_processor(images=__A , return_tensors="pt" )
_SCREAMING_SNAKE_CASE = timm_model(inputs["pixel_values"] )
_SCREAMING_SNAKE_CASE = model(**__A ).logits
assert torch.allclose(__A , __A , atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 111 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111 | 1 |
def _a ( UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 1_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
for divide_by_number in range(UpperCamelCase_ , digit + 1 ):
lowerCAmelCase__ = []
lowerCAmelCase__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCamelCase_ ):
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = divide_by_number
else:
has_been_divided.append(UpperCamelCase_ )
lowerCAmelCase__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def A ( a_ ,a_=1_000 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase : List[Any] =n - 1
__UpperCamelCase : Dict =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase : Optional[Any] =0
while count < prec:
__UpperCamelCase : Dict =random.randint(2 ,n - 1 )
__UpperCamelCase : Optional[Any] =bin_exp_mod(a_ ,a_ ,a_ )
if b != 1:
__UpperCamelCase : List[str] =True
for _ in range(a_ ):
if b == n - 1:
__UpperCamelCase : Tuple =False
break
__UpperCamelCase : Dict =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ :str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 71 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
A : Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A : int = min(a__ , a__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 369 |
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_lowerCAmelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCAmelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , lowercase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
# load decoder from hub
_lowerCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self : Any , **__snake_case : Optional[Any] ) -> Tuple:
_lowerCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(lowercase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def lowercase__ ( self : str , **__snake_case : Dict ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowercase_ )
def lowercase__ ( self : Union[str, Any] , **__snake_case : int ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowercase_ )
def lowercase__ ( self : Tuple ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowercase_ )
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase__ ( self : Dict ) -> Any:
_lowerCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowercase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowercase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
_lowerCAmelCase = floats_list((3, 10_00) )
_lowerCAmelCase = feature_extractor(lowercase_ , return_tensors="""np""" )
_lowerCAmelCase = processor(lowercase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
_lowerCAmelCase = '''This is a test string'''
_lowerCAmelCase = processor(text=lowercase_ )
_lowerCAmelCase = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[str] , __snake_case : Any=(2, 10, 16) , __snake_case : str=77 ) -> List[str]:
np.random.seed(lowercase_ )
return np.random.rand(*lowercase_ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
_lowerCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCAmelCase = processor.decode(lowercase_ )
_lowerCAmelCase = decoder.decode_beams(lowercase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Any:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
_lowerCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCAmelCase = processor.batch_decode(lowercase_ )
else:
with get_context(lowercase_ ).Pool() as pool:
_lowerCAmelCase = processor.batch_decode(lowercase_ , lowercase_ )
_lowerCAmelCase = list(lowercase_ )
with get_context("""fork""" ).Pool() as p:
_lowerCAmelCase = decoder.decode_beams_batch(lowercase_ , lowercase_ )
_lowerCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowercase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(lowercase_ , decoded_processor.logit_score )
self.assertListEqual(lowercase_ , decoded_processor.lm_score )
def lowercase__ ( self : List[str] ) -> List[str]:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
_lowerCAmelCase = self._get_dummy_logits()
_lowerCAmelCase = 15
_lowerCAmelCase = -20.0
_lowerCAmelCase = -4.0
_lowerCAmelCase = processor.batch_decode(
lowercase_ , beam_width=lowercase_ , beam_prune_logp=lowercase_ , token_min_logp=lowercase_ , )
_lowerCAmelCase = decoded_processor_out.text
_lowerCAmelCase = list(lowercase_ )
with get_context("""fork""" ).Pool() as pool:
_lowerCAmelCase = decoder.decode_beams_batch(
lowercase_ , lowercase_ , beam_width=lowercase_ , beam_prune_logp=lowercase_ , token_min_logp=lowercase_ , )
_lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCAmelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowercase_ )
self.assertTrue(np.array_equal(lowercase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , lowercase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowercase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , lowercase_ , atol=1E-3 ) )
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
_lowerCAmelCase = self._get_dummy_logits()
_lowerCAmelCase = 2.0
_lowerCAmelCase = 5.0
_lowerCAmelCase = -20.0
_lowerCAmelCase = True
_lowerCAmelCase = processor.batch_decode(
lowercase_ , alpha=lowercase_ , beta=lowercase_ , unk_score_offset=lowercase_ , lm_score_boundary=lowercase_ , )
_lowerCAmelCase = decoded_processor_out.text
_lowerCAmelCase = list(lowercase_ )
decoder.reset_params(
alpha=lowercase_ , beta=lowercase_ , unk_score_offset=lowercase_ , lm_score_boundary=lowercase_ , )
with get_context("""fork""" ).Pool() as pool:
_lowerCAmelCase = decoder.decode_beams_batch(
lowercase_ , lowercase_ , )
_lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowercase_ )
_lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowercase_ )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCAmelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_lowerCAmelCase = os.listdir(lowercase_ )
_lowerCAmelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowercase_ , lowercase_ )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
_lowerCAmelCase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(lowercase_ )
_lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCAmelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_lowerCAmelCase = os.listdir(lowercase_ )
_lowerCAmelCase = os.listdir(lowercase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowercase_ , lowercase_ )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_lowerCAmelCase = floats_list((3, 10_00) )
_lowerCAmelCase = processor_wavaveca(lowercase_ , return_tensors="""np""" )
_lowerCAmelCase = processor_auto(lowercase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCAmelCase = self._get_dummy_logits()
_lowerCAmelCase = processor_wavaveca.batch_decode(lowercase_ )
_lowerCAmelCase = processor_auto.batch_decode(lowercase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_decoder()
_lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowercase__ ( __snake_case : Tuple , __snake_case : str ) -> int:
_lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self : str ) -> List[Any]:
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_lowerCAmelCase = self._get_dummy_logits()[0]
_lowerCAmelCase = processor.decode(lowercase_ , output_word_offsets=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowercase__ ( self : Optional[int] ) -> Any:
_lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_lowerCAmelCase = self._get_dummy_logits()
_lowerCAmelCase = processor.batch_decode(lowercase_ , output_word_offsets=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowercase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self : int ) -> Union[str, Any]:
import torch
_lowerCAmelCase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowercase_ )
_lowerCAmelCase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCAmelCase = iter(lowercase_ )
_lowerCAmelCase = next(lowercase_ )
_lowerCAmelCase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_lowerCAmelCase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCAmelCase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_lowerCAmelCase = model(lowercase_ ).logits.cpu().numpy()
_lowerCAmelCase = processor.decode(logits[0] , output_word_offsets=lowercase_ )
_lowerCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCAmelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowercase_ , """word""" ) ) , lowercase_ )
self.assertEqual(""" """.join(self.get_from_offsets(lowercase_ , """word""" ) ) , output.text )
# output times
_lowerCAmelCase = torch.tensor(self.get_from_offsets(lowercase_ , """start_time""" ) )
_lowerCAmelCase = torch.tensor(self.get_from_offsets(lowercase_ , """end_time""" ) )
# fmt: off
_lowerCAmelCase = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
_lowerCAmelCase = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=0.01 ) )
| 70 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__A = re.compile(R'\b(a|an|the)\b', re.UNICODE)
__A = None
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowercase_ , default=1.0 , help='''Predict \"\" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowercase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __A ( _lowercase ):
'''simple docstring'''
_A = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_A = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __A ( _lowercase ):
'''simple docstring'''
def remove_articles(_lowercase ):
return ARTICLES_REGEX.sub(''' ''' , lowercase_ )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
_A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def __A ( _lowercase ):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase_ ).split()
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = get_tokens(lowercase_ )
_A = get_tokens(lowercase_ )
_A = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
_A = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_A = 1.0 * num_same / len(lowercase_ )
_A = 1.0 * num_same / len(lowercase_ )
_A = (2 * precision * recall) / (precision + recall)
return fa
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = {}
_A = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_A = qa['''id''']
_A = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_A = ['''''']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
_A = preds[qid]
# Take max over all gold answers
_A = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
_A = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {}
for qid, s in scores.items():
_A = na_probs[qid] > na_prob_thresh
if pred_na:
_A = float(not qid_to_has_ans[qid] )
else:
_A = s
return new_scores
def __A ( _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
if not qid_list:
_A = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_00.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_A = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for k in new_eval:
_A = new_eval[k]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
plt.step(lowercase_ , lowercase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowercase_ , lowercase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None ):
'''simple docstring'''
_A = sorted(lowercase_ , key=lambda _lowercase : na_probs[k] )
_A = 0.0
_A = 1.0
_A = 0.0
_A = [1.0]
_A = [0.0]
_A = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_A = true_pos / float(i + 1 )
_A = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_00.0 * avg_prec}
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
_A = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_A = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
_A = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
_A = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
_A = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowercase_ , lowercase_ , '''pr_exact''' )
merge_eval(lowercase_ , lowercase_ , '''pr_f1''' )
merge_eval(lowercase_ , lowercase_ , '''pr_oracle''' )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not qid_list:
return
_A = [na_probs[k] for k in qid_list]
_A = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowercase_ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_A = num_no_ans
_A = cur_score
_A = 0.0
_A = sorted(lowercase_ , key=lambda _lowercase : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_A = scores[qid]
else:
if preds[qid]:
_A = -1
else:
_A = 0
cur_score += diff
if cur_score > best_score:
_A = cur_score
_A = na_probs[qid]
return 1_00.0 * best_score / len(lowercase_ ), best_thresh
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_A ,_A = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_A = best_exact
_A = exact_thresh
_A = best_fa
_A = fa_thresh
def __A ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
_A = json.load(lowercase_ )
_A = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_A = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_A = json.load(lowercase_ )
else:
_A = {k: 0.0 for k in preds}
_A = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
_A = [k for k, v in qid_to_has_ans.items() if v]
_A = [k for k, v in qid_to_has_ans.items() if not v]
_A ,_A = get_raw_scores(lowercase_ , lowercase_ )
_A = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
_A = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
_A = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
_A = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''HasAns''' )
if no_ans_qids:
_A = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
__A = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 362 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Tuple , **__A: str ) -> str:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: Any , **__A: List[Any] ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[Any] , **__A: Union[str, Any] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def __A ( self: List[str] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A ( self: List[str] ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: str ) -> Any:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: List[str] ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = floats_list((3, 10_00) )
_A = feature_extractor(__A , return_tensors='''np''' )
_A = processor(__A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Union[str, Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = '''This is a test string'''
_A = processor(text=__A )
_A = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: List[str] , __A: Optional[int]=(2, 10, 16) , __A: Optional[int]=77 ) -> List[Any]:
np.random.seed(__A )
return np.random.rand(*__A )
def __A ( self: List[Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(__A )
_A = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: str , __A: Any ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_A = processor.batch_decode(__A , __A )
_A = list(__A )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(__A , __A )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def __A ( self: Optional[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = decoded_processor_out.text
_A = list(__A )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
_A = decoded_processor_out.text
_A = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def __A ( self: int ) -> Optional[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def __A ( self: Tuple ) -> Any:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(__A )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def __A ( self: List[str] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(__A , return_tensors='''np''' )
_A = processor_auto(__A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(__A )
_A = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: Optional[int] ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: int , __A: List[str] ) -> Union[str, Any]:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: Optional[Any] ) -> int:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: Optional[Any] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: Optional[Any] ) -> Optional[Any]:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(__A )
_A = next(__A )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(__A ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=__A )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A )
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 75 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger()
@dataclass
class snake_case__ :
a_ = 42
a_ = field(default_factory=__a)
a_ = field(default_factory=__a)
def A ( self : Optional[Any] , _A : List[str] , _A : List[Any] , _A : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self : List[str] , _A : Union[str, Any] ) -> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def A ( self : List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case__ :
a_ = 42
a_ = 42
a_ = 0
a_ = field(default_factory=__a)
a_ = field(default_factory=__a)
def __call__( self : Optional[Any] , _A : str ) -> str:
UpperCAmelCase_ : Union[str, Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
UpperCAmelCase_ : int = Tracker(self.src )(lowerCAmelCase__ ).parametrized
UpperCAmelCase_ : Tuple = list(filter(lambda _A : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
UpperCAmelCase_ : List[Any] = list(filter(lambda _A : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F"Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while"
F" destination module has {len(lowerCAmelCase__ )}." )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
def __UpperCAmelCase ( A : str , A : Dict , A : str , A : Any = True ) -> str:
print(F"Converting {name}..." )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCAmelCase_ : int = ResNetForImageClassification(UpperCamelCase_ ).eval()
UpperCAmelCase_ : int = ModuleTransfer(src=UpperCamelCase_ , dest=UpperCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCamelCase_ )
assert torch.allclose(from_model(UpperCamelCase_ ) , our_model(UpperCamelCase_ ).logits ), "The model logits don't match the original one."
UpperCAmelCase_ : Tuple = F"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=UpperCamelCase_ , )
# we can use the convnext one
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase_ , )
print(F"Pushed {checkpoint_name}" )
def __UpperCAmelCase ( A : Optional[int] , A : Dict = None , A : Optional[Any] = True ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : Dict = 1_0_0_0
UpperCAmelCase_ : Tuple = (1, num_labels)
UpperCAmelCase_ : Dict = '''huggingface/label-files'''
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Optional[Any] = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = partial(UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase_ , names_to_config[model_name] , UpperCamelCase_ , UpperCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_UpperCamelCase : List[Any] = parser.parse_args()
_UpperCamelCase : Any = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 304 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = XLNetTokenizer
__lowercase : List[str] = XLNetTokenizerFast
__lowercase : List[Any] = True
__lowercase : int = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_6)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 100 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Tuple = filter(lambda A_ : p.requires_grad , model.parameters() )
a : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowercase = logging.getLogger(__name__)
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
if metric == "rouge2":
a : List[str] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a : List[Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a : List[Any] = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
a : Dict = ModelCheckpoint(
dirpath=A_ , filename=A_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A_ , verbose=A_ , )
class _A ( pl.Callback ):
"""simple docstring"""
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str):
a : Any = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : int , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule , __UpperCAmelCase : str , __UpperCAmelCase : List[str]=True):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''')
a : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
a : str = Path(pl_module.hparams.output_dir)
if type_path == "test":
a : Optional[Any] = od / "test_results.txt"
a : Union[str, Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
a : List[Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCAmelCase)
generations_file.parent.mkdir(exist_ok=__UpperCAmelCase)
with open(__UpperCAmelCase , "a+") as writer:
for key in sorted(__UpperCAmelCase):
if key in ["log", "progress_bar", "preds"]:
continue
a : Union[str, Any] = metrics[key]
if isinstance(__UpperCAmelCase , torch.Tensor):
a : List[Any] = val.item()
a : Any = f'''{key}: {val:.6f}\n'''
writer.write(__UpperCAmelCase)
if not save_generations:
return
if "preds" in metrics:
a : Dict = "\n".join(metrics["preds"])
generations_file.open("w+").write(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]):
try:
a : int = pl_module.model.model.num_parameters()
except AttributeError:
a : int = pl_module.model.num_parameters()
a : Union[str, Any] = count_trainable_parameters(__UpperCAmelCase)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule):
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test")
@rank_zero_only
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : str):
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 226 | 1 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = tuple[int, int, int]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__SCREAMING_SNAKE_CASE : List[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__SCREAMING_SNAKE_CASE : int = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__SCREAMING_SNAKE_CASE : Any = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__SCREAMING_SNAKE_CASE : str = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__SCREAMING_SNAKE_CASE : int = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__SCREAMING_SNAKE_CASE : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__SCREAMING_SNAKE_CASE : Any = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__SCREAMING_SNAKE_CASE : List[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__SCREAMING_SNAKE_CASE : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
snake_case_ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
snake_case_ , snake_case_ , snake_case_ = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
snake_case_ = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def _a ( _SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"""
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
snake_case_ = f"""Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"""
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
snake_case_ = set()
for i in pbstring:
if i not in abc:
snake_case_ = f"""'{i}' not in list of symbols"""
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
snake_case_ = f"""Duplicate symbol ({i})"""
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
snake_case_ = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
snake_case_ = pbstring[j + 1]
snake_case_ = pbstring[j]
return pb
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) -> str:
snake_case_ = text.upper()
snake_case_ , snake_case_ , snake_case_ = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
snake_case_ , snake_case_ , snake_case_ = rotor_position
snake_case_ , snake_case_ , snake_case_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# rotor ra --------------------------
snake_case_ = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
snake_case_ = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
snake_case_ = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
snake_case_ = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
snake_case_ = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
snake_case_ = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case_ = reflector[symbol]
# 2nd rotors
snake_case_ = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
snake_case_ = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
snake_case_ = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = 'This is my Python script that emulates the Enigma machine from WWII.'
__SCREAMING_SNAKE_CASE : Dict = (1, 1, 1)
__SCREAMING_SNAKE_CASE : Dict = 'pictures'
__SCREAMING_SNAKE_CASE : str = (rotora, rotora, rotora)
__SCREAMING_SNAKE_CASE : Any = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 347 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int ,lowercase__ : Union[str, Any]=1_3 ,lowercase__ : Tuple=7 ,lowercase__ : Tuple=False ,lowercase__ : Tuple=True ,lowercase__ : Union[str, Any]=False ,lowercase__ : str=False ,lowercase__ : Tuple=1_9 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Optional[int]=4 ,lowercase__ : str=3_7 ,lowercase__ : int="gelu" ,lowercase__ : str=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : int=1_6 ,lowercase__ : int=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=3 ,lowercase__ : int=4 ,lowercase__ : Any=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = EsmConfig(
vocab_size=3_3 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=lowercase__ ,esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} ,)
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Tuple ):
__lowercase = EsmForProteinFolding(config=lowercase__ ).float()
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Tuple = (EsmForProteinFolding,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : str = ()
SCREAMING_SNAKE_CASE : Tuple = {} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = EsmFoldModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@require_torch
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__lowercase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__lowercase = model(lowercase__ )['''positions''']
__lowercase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,lowercase__ ,atol=1e-4 ) )
| 52 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'luke'
def __init__( self : int ,lowercase__ : Tuple=5_0_2_6_7 ,lowercase__ : str=5_0_0_0_0_0 ,lowercase__ : Union[str, Any]=7_6_8 ,lowercase__ : Any=2_5_6 ,lowercase__ : int=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : Dict="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=1 ,lowercase__ : int=0 ,lowercase__ : Tuple=2 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 52 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase: str = None
lowerCAmelCase: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase: List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase: List[Any] = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowerCAmelCase: Tuple = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class a__( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ['''input_ids''', '''attention_mask''']
lowercase__ = NllbTokenizer
lowercase__ = []
lowercase__ = []
def __init__( self : int , __snake_case : str=None , __snake_case : Dict=None , __snake_case : Dict="<s>" , __snake_case : int="</s>" , __snake_case : Any="</s>" , __snake_case : int="<s>" , __snake_case : Dict="<unk>" , __snake_case : Tuple="<pad>" , __snake_case : Optional[Any]="<mask>" , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=False , **__snake_case : Any , ):
a : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
a : str = legacy_behaviour
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , legacy_behaviour=__snake_case , **__snake_case , )
a : Optional[int] = vocab_file
a : str = False if not self.vocab_file else True
a : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
a : Any = {
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : str = src_lang if src_lang is not None else 'eng_Latn'
a : int = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self : List[str] ):
return self._src_lang
@src_lang.setter
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] ):
a : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self : Union[str, Any] , __snake_case : Dict , __snake_case : Any = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self : str , __snake_case : Dict , __snake_case : str = None ):
a : Optional[int] = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : List[Any] , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any] , **__snake_case : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : List[str] = src_lang
a : Any = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
a : Optional[int] = self.convert_tokens_to_ids(__snake_case )
a : List[str] = tgt_lang_id
return inputs
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[str] = "eng_Latn" , __snake_case : Optional[int] = None , __snake_case : List[Any] = "fra_Latn" , **__snake_case : List[Any] , ):
a : Any = src_lang
a : str = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self : Optional[int] , __snake_case : Optional[int] ):
a : List[str] = self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
a : Optional[Any] = []
a : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : Union[str, Any] = [self.eos_token_id]
a : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
a : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
a : Optional[Any] = self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
a : int = []
a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
a : Dict = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : List[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,) | 297 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : List[str] = batch_size
a__ : List[str] = image_size
a__ : Dict = patch_size
a__ : Optional[Any] = num_channels
a__ : List[Any] = is_training
a__ : str = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Dict = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : Any = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : Tuple = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : int = ViTMSNModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.type_sequence_label_size
a__ : List[str] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[str] = 1
a__ : Optional[int] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] = config_and_inputs
a__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__A : Tuple = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Any = False
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = ViTMSNModelTester(self)
a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowercase)
a__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = ViTMSNModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> Dict:
a__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(2)
a__ : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase)
a__ : Any = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Tuple = model(**lowercase)
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 99 | 0 |
from collections.abc import Iterable
from typing import Any
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : int | None = None ) ->Optional[Any]:
"""simple docstring"""
a = value
a = None # Added in order to delete a node easier
a = None
a = None
def __repr__( self : int ) ->str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Node | None = None ) ->str:
"""simple docstring"""
a = root
def __str__( self : Dict ) ->str:
"""simple docstring"""
return str(self.root )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Node , __UpperCAmelCase : Node | None ) ->None:
"""simple docstring"""
if new_children is not None: # reset its kids
a = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__UpperCAmelCase ): # If it is the right children
a = new_children
else:
a = new_children
else:
a = new_children
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Node ) ->bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __lowerCAmelCase ( self : Optional[int] ) ->bool:
"""simple docstring"""
return self.root is None
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict ) ->None:
"""simple docstring"""
a = Node(__UpperCAmelCase ) # create a new Node
if self.empty(): # if Tree is empty
a = new_node # set its root
else: # Tree is not empty
a = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a = new_node # We insert the new node in a leaf
break
else:
a = parent_node.left
else:
if parent_node.right is None:
a = new_node
break
else:
a = parent_node.right
a = parent_node
def __lowerCAmelCase ( self : List[Any] , *__UpperCAmelCase : str ) ->None:
"""simple docstring"""
for value in values:
self.__insert(__UpperCAmelCase )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] ) ->Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
a = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a = node.left if value < node.value else node.right
return node
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Node | None = None ) ->Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
a = self.root
if not self.empty():
while node.right is not None:
a = node.right
return node
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Node | None = None ) ->Node | None:
"""simple docstring"""
if node is None:
a = self.root
if self.root is None:
return None
if not self.empty():
a = self.root
while node.left is not None:
a = node.left
return node
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int ) ->None:
"""simple docstring"""
a = self.search(__UpperCAmelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__UpperCAmelCase , __UpperCAmelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__UpperCAmelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__UpperCAmelCase , node.left )
else:
a = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Node | None ) ->Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str=None ) ->Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : list , __UpperCAmelCase : Node | None ) ->None:
"""simple docstring"""
if node:
self.inorder(__UpperCAmelCase , node.left )
arr.append(node.value )
self.inorder(__UpperCAmelCase , node.right )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Node ) ->int:
"""simple docstring"""
a = []
self.inorder(__UpperCAmelCase , __UpperCAmelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def _a ( a :Node | None ) -> list[Node]:
a = []
if curr_node is not None:
a = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _a ( ) -> None:
a = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a = BinarySearchTree()
for i in testlist:
t.insert(a )
# Prints all the elements of the list in order traversal
print(a )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(a )
print(a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase__ = '''▁'''
class __snake_case ( _lowercase):
snake_case__ : int = VOCAB_FILES_NAMES
snake_case__ : Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[int]="<s>" , __lowerCAmelCase : Optional[int]="<unk>" , __lowerCAmelCase : str="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Dict = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowerCamelCase : Dict = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_lowerCamelCase : int = len(self.sp_model ) - 1
_lowerCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : List[str] = [self.cls_token_id]
_lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : List[Any] = self.sp_model.PieceToId(__lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
_lowerCamelCase : Tuple = ''''''
_lowerCamelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCamelCase : List[str] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __getstate__( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.__dict__.copy()
_lowerCamelCase : int = None
return state
def __setstate__( self : int , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Any = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 72 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''LayoutLMv3ImageProcessor'''
__snake_case = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : Tuple , __UpperCAmelCase : int=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Tuple , ) ->BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
a = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = [text] # add batch dimension (as the image processor always adds a batch dimension)
a = features['''words''']
a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
a = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
a = images
return encoded_inputs
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}""" )
return images_with_overflow
def __lowerCAmelCase ( self : Union[str, Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : str ) ->Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
_UpperCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_UpperCAmelCase = True
_UpperCAmelCase = int(vit_name[-12:-10] )
_UpperCAmelCase = int(vit_name[-9:-6] )
else:
_UpperCAmelCase = 10_00
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = int(vit_name[-6:-4] )
_UpperCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
_UpperCAmelCase = 1_92
_UpperCAmelCase = 7_68
_UpperCAmelCase = 12
_UpperCAmelCase = 3
elif vit_name[9:].startswith("""small""" ):
_UpperCAmelCase = 3_84
_UpperCAmelCase = 15_36
_UpperCAmelCase = 12
_UpperCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
_UpperCAmelCase = 7_68
_UpperCAmelCase = 23_04
_UpperCAmelCase = 8
_UpperCAmelCase = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
_UpperCAmelCase = 10_24
_UpperCAmelCase = 40_96
_UpperCAmelCase = 24
_UpperCAmelCase = 16
elif vit_name[4:].startswith("""huge""" ):
_UpperCAmelCase = 12_80
_UpperCAmelCase = 51_20
_UpperCAmelCase = 32
_UpperCAmelCase = 16
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_UpperCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
_UpperCAmelCase = ViTImageProcessor(size=config.image_size )
_UpperCAmelCase = image_processor(images=prepare_img() ,return_tensors="""pt""" )
_UpperCAmelCase = encoding["""pixel_values"""]
_UpperCAmelCase = model(lowercase )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 289 | """simple docstring"""
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = [s]
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 289 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCAmelCase_ : str = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCAmelCase_ : Optional[int] = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCAmelCase_ : str = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
UpperCAmelCase_ : Dict = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ : int = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 145 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[int] = len(self.sp_model )
UpperCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Any = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Dict = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[lang]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = [self.eos_token_id, self.cur_lang_code]
| 145 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase , __lowerCamelCase : Dict = analyze_text(lowerCamelCase__ )
__lowerCamelCase : List[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCamelCase : int = sum(single_char_strings.values() )
# one length string
__lowerCamelCase : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCamelCase : str = single_char_strings[ch]
__lowerCamelCase : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase__ ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__lowerCamelCase : Tuple = sum(two_char_strings.values() )
__lowerCamelCase : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCamelCase : str = cha + cha
if sequence in two_char_strings:
__lowerCamelCase : str = two_char_strings[sequence]
__lowerCamelCase : Optional[int] = int(lowerCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase__ )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[dict, dict]:
__lowerCamelCase : List[str] = Counter() # type: ignore
__lowerCamelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 3
lowerCamelCase = 4
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = """left"""
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Union[str, Any]="<sep>" , _lowerCAmelCase : Union[str, Any]="<pad>" , _lowerCAmelCase : Union[str, Any]="<cls>" , _lowerCAmelCase : List[Any]="<mask>" , _lowerCAmelCase : List[Any]=["<eop>", "<eod>"] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : str , ):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else mask_token
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =3
__lowercase =do_lower_case
__lowercase =remove_space
__lowercase =keep_accents
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return len(self.sp_model)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : str):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : List[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.remove_space:
__lowercase =' '.join(inputs.strip().split())
else:
__lowercase =inputs
__lowercase =outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__lowercase =unicodedata.normalize('NFKD' , _lowerCAmelCase)
__lowercase =''.join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase)])
if self.do_lower_case:
__lowercase =outputs.lower()
return outputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.preprocess_text(_lowerCAmelCase)
__lowercase =self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
__lowercase =[]
for piece in pieces:
if len(_lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowercase =self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowercase =cur_pieces[1:]
else:
__lowercase =cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCAmelCase)
else:
new_pieces.append(_lowerCAmelCase)
return new_pieces
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str]):
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =''.join(_lowerCAmelCase).replace(_lowerCAmelCase , ' ').strip()
return out_string
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =kwargs.pop('use_source_tokenizer' , _lowerCAmelCase)
__lowercase =self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase =[]
__lowercase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
__lowercase =[]
sub_texts.append(_lowerCAmelCase)
else:
current_sub_text.append(_lowerCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase =''.join(_lowerCAmelCase)
__lowercase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase =self.clean_up_tokenization(_lowerCAmelCase)
return clean_text
else:
return text
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase)
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase)) + [1] + ([0] * len(_lowerCAmelCase)) + [1, 1]
return ([0] * len(_lowerCAmelCase)) + [1, 1]
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 48 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowerCamelCase = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =EfficientNetConfig()
__lowercase =CONFIG_MAP[model_name]['hidden_dim']
__lowercase =CONFIG_MAP[model_name]['width_coef']
__lowercase =CONFIG_MAP[model_name]['depth_coef']
__lowercase =CONFIG_MAP[model_name]['image_size']
__lowercase =CONFIG_MAP[model_name]['dropout_rate']
__lowercase =CONFIG_MAP[model_name]['dw_padding']
__lowercase ='huggingface/label-files'
__lowercase ='imagenet-1k-id2label.json'
__lowercase =1_000
__lowercase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
return config
def _A ( ):
"""simple docstring"""
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =CONFIG_MAP[model_name]['image_size']
__lowercase =EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_lowerCAmelCase , )
return preprocessor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
__lowercase =sorted(set(_lowerCAmelCase ) )
__lowercase =len(_lowerCAmelCase )
__lowercase ={b: str(_lowerCAmelCase ) for b, i in zip(_lowerCAmelCase , range(_lowerCAmelCase ) )}
__lowercase =[]
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
__lowercase =block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
__lowercase ={}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase ='efficientnet.' + item[1]
__lowercase ='classifier.weight'
__lowercase ='classifier.bias'
return key_mapping
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase =key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase =torch.from_numpy(_lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase =torch.from_numpy(_lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase =torch.from_numpy(np.transpose(_lowerCAmelCase ) )
else:
__lowercase =torch.from_numpy(_lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowerCAmelCase )
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =model_classes[model_name](
include_top=_lowerCAmelCase , weights='imagenet' , input_tensor=_lowerCAmelCase , input_shape=_lowerCAmelCase , pooling=_lowerCAmelCase , classes=1_000 , classifier_activation='softmax' , )
__lowercase =original_model.trainable_variables
__lowercase =original_model.non_trainable_variables
__lowercase ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase =param.numpy()
__lowercase =list(tf_params.keys() )
# Load HuggingFace model
__lowercase =get_efficientnet_config(_lowerCAmelCase )
__lowercase =EfficientNetForImageClassification(_lowerCAmelCase ).eval()
__lowercase =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
__lowercase =rename_keys(_lowerCAmelCase )
replace_params(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Initialize preprocessor and preprocess input image
__lowercase =convert_image_processor(_lowerCAmelCase )
__lowercase =preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase =hf_model(**_lowerCAmelCase )
__lowercase =outputs.logits.detach().numpy()
# Original model inference
__lowercase =False
__lowercase =CONFIG_MAP[model_name]['image_size']
__lowercase =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase =image.img_to_array(_lowerCAmelCase )
__lowercase =np.expand_dims(_lowerCAmelCase , axis=0 )
__lowercase =original_model.predict(_lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowerCAmelCase ):
os.mkdir(_lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_lowerCAmelCase )
preprocessor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__lowercase =f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_lowerCAmelCase )
hf_model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForSeqaSeqLM.from_config(config=_A )
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(_A )
SCREAMING_SNAKE_CASE__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
SCREAMING_SNAKE_CASE__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = F'''layers_{str(_A )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params['''encoder''']['''block'''][str(_A )]['''layer''']
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = F'''layers_{str(_A )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params['''decoder''']['''block'''][str(_A )]['''layer''']
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE__ = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE__ = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''token_embedder''']['''embedding''']
SCREAMING_SNAKE_CASE__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_A )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 314 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Tuple = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:int = "glpn"
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-6 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=-1 , **_SCREAMING_SNAKE_CASE , )-> Any:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_encoder_blocks
lowerCamelCase_ =depths
lowerCamelCase_ =sr_ratios
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =patch_sizes
lowerCamelCase_ =strides
lowerCamelCase_ =mlp_ratios
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =decoder_hidden_size
lowerCamelCase_ =max_depth
lowerCamelCase_ =head_in_index
| 49 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCamelCase ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =coefficient_matrix.shape
lowerCamelCase_ , lowerCamelCase_ =constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ =f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_A )
if colsa != 1:
lowerCamelCase_ =f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_A )
if rowsa != rowsa:
lowerCamelCase_ =(
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_A )
if len(_A ) != rowsa:
lowerCamelCase_ =(
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_A )} and {rowsa}'
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCamelCase_ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_ , lowerCamelCase_ =table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
lowerCamelCase_ =[]
for row in range(_A ):
lowerCamelCase_ =0
for col in range(_A ):
if col == row:
lowerCamelCase_ =table[row][col]
elif col == cols - 1:
lowerCamelCase_ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ =(temp + val) / denom
new_val.append(_A )
lowerCamelCase_ =new_val
return [float(_A ) for i in new_val]
def __UpperCamelCase ( _A : NDArray[floataa] ) ->bool:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =table.shape
lowerCamelCase_ =True
for i in range(0 , _A ):
lowerCamelCase_ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Dict = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _SCREAMING_SNAKE_CASE ).groups()[0]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=None , __a=None ):
'''simple docstring'''
__a : Any = file_names
__a : List[str] = image_transform
__a : List[str] = label_to_id
def __len__( self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self , __a ):
'''simple docstring'''
__a : Dict = self.file_names[idx]
__a : Tuple = PIL.Image.open(__a )
__a : int = raw_image.convert('RGB' )
if self.image_transform is not None:
__a : List[Any] = self.image_transform(__a )
__a : List[str] = extract_label(__a )
if self.label_to_id is not None:
__a : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ):
# Initialize accelerator
if args.with_tracking:
__a : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__a : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Optional[int] = config['lr']
__a : Optional[Any] = int(config['num_epochs'] )
__a : Tuple = int(config['seed'] )
__a : List[str] = int(config['batch_size'] )
__a : Union[str, Any] = config['image_size']
if not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
__a : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
__a : Optional[int] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__a : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__a : Optional[Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__a : List[str] = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('.' )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Grab all the image filenames
__a : Optional[int] = [os.path.join(args.data_dir , _SCREAMING_SNAKE_CASE ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
__a : List[str] = [extract_label(_SCREAMING_SNAKE_CASE ) for fname in file_names]
__a : Tuple = list(set(_SCREAMING_SNAKE_CASE ) )
id_to_label.sort()
__a : Optional[Any] = {lbl: i for i, lbl in enumerate(_SCREAMING_SNAKE_CASE )}
# Set the seed before splitting the data.
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# Split our filenames between train and validation
__a : int = np.random.permutation(len(_SCREAMING_SNAKE_CASE ) )
__a : str = int(0.8 * len(_SCREAMING_SNAKE_CASE ) )
__a : List[Any] = random_perm[:cut]
__a : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__a : str = Compose([RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.5, 1.0) ), ToTensor()] )
__a : Dict = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_SCREAMING_SNAKE_CASE , label_to_id=_SCREAMING_SNAKE_CASE )
# For evaluation, we use a deterministic Resize
__a : Optional[int] = Compose([Resize(_SCREAMING_SNAKE_CASE ), ToTensor()] )
__a : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=_SCREAMING_SNAKE_CASE , label_to_id=_SCREAMING_SNAKE_CASE )
# Instantiate dataloaders.
__a : Dict = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
__a : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : List[Any] = create_model('resnet50d' , pretrained=_SCREAMING_SNAKE_CASE , num_classes=len(_SCREAMING_SNAKE_CASE ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__a : str = False
for param in model.get_classifier().parameters():
__a : List[Any] = True
# We normalize the batches of images to be a bit faster.
__a : List[Any] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
__a : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__a : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__a : Union[str, Any] = OneCycleLR(optimizer=_SCREAMING_SNAKE_CASE , max_lr=_SCREAMING_SNAKE_CASE , epochs=_SCREAMING_SNAKE_CASE , steps_per_epoch=len(_SCREAMING_SNAKE_CASE ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : List[str] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : List[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
__a : Tuple = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__a : List[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__a : Any = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__a : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__a : Optional[int] = os.path.splitext(_SCREAMING_SNAKE_CASE )[0]
if "epoch" in training_difference:
__a : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1
__a : str = None
else:
__a : str = int(training_difference.replace('step_' , '' ) )
__a : Union[str, Any] = resume_step // len(_SCREAMING_SNAKE_CASE )
resume_step -= starting_epoch * len(_SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
if args.with_tracking:
__a : Dict = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__a : int = accelerator.skip_first_batches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__a : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__a : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
__a : Tuple = (batch['image'] - mean) / std
__a : List[str] = model(_SCREAMING_SNAKE_CASE )
__a : List[Any] = torch.nn.functional.cross_entropy(_SCREAMING_SNAKE_CASE , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[int] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__a : List[Any] = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
model.eval()
__a : str = 0
__a : List[Any] = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__a : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
__a : Tuple = (batch['image'] - mean) / std
with torch.no_grad():
__a : List[Any] = model(_SCREAMING_SNAKE_CASE )
__a : str = outputs.argmax(dim=-1 )
__a , __a : Dict = accelerator.gather_for_metrics((predictions, batch['label']) )
__a : Optional[int] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__a : int = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
'epoch': epoch,
} , step=_SCREAMING_SNAKE_CASE , )
if checkpointing_steps == "epoch":
__a : str = F"""epoch_{epoch}"""
if args.output_dir is not None:
__a : Tuple = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase ():
__a : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_SCREAMING_SNAKE_CASE , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_SCREAMING_SNAKE_CASE , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__a : Tuple = parser.parse_args()
__a : int = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__UpperCamelCase = random.Random()
def _a ( _lowerCamelCase , _lowerCamelCase=1.0 , _lowerCamelCase=None , _lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
__snake_case : Union[str, Any] = global_rng
__snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
def __init__( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=7 , __magic_name__ : Union[str, Any]=4_00 , __magic_name__ : Optional[Any]=20_00 , __magic_name__ : List[str]=10 , __magic_name__ : List[str]=1_60 , __magic_name__ : str=8 , __magic_name__ : Any=0.0 , __magic_name__ : str=40_00 , __magic_name__ : Union[str, Any]=False , __magic_name__ : int=True , ) -> Any:
"""simple docstring"""
__snake_case : str = parent
__snake_case : List[str] = batch_size
__snake_case : Any = min_seq_length
__snake_case : Tuple = max_seq_length
__snake_case : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case : Any = padding_value
__snake_case : Optional[Any] = sampling_rate
__snake_case : Optional[int] = return_attention_mask
__snake_case : Optional[Any] = do_normalize
__snake_case : Optional[int] = feature_size
__snake_case : Tuple = chunk_length
__snake_case : Optional[int] = hop_length
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : Tuple , __magic_name__ : List[str]=False , __magic_name__ : List[str]=False ) -> List[Any]:
"""simple docstring"""
def _flatten(__magic_name__ : Tuple ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
__snake_case : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case : Tuple = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Dict = WhisperFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = WhisperFeatureExtractionTester(self )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
__snake_case : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
__snake_case : int = feat_extract_first.to_dict()
__snake_case : str = feat_extract_second.to_dict()
__snake_case : str = feat_extract_first.mel_filters
__snake_case : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = os.path.join(__magic_name__ , """feat_extract.json""" )
feat_extract_first.to_json_file(__magic_name__ )
__snake_case : Union[str, Any] = self.feature_extraction_class.from_json_file(__magic_name__ )
__snake_case : Dict = feat_extract_first.to_dict()
__snake_case : List[str] = feat_extract_second.to_dict()
__snake_case : int = feat_extract_first.mel_filters
__snake_case : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
__snake_case : Tuple = feature_extractor(__magic_name__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__snake_case : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__snake_case : str = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# Test batched
__snake_case : Tuple = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features
__snake_case : int = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__snake_case : int = np.asarray(__magic_name__ )
__snake_case : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features
__snake_case : Tuple = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# Test truncation required
__snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__snake_case : List[str] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
__snake_case : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
__snake_case : List[str] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated]
__snake_case : Optional[Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features
__snake_case : Any = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
import torch
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Optional[Any] = np.random.rand(1_00 , 32 ).astype(np.floataa )
__snake_case : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__snake_case : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case : str = ds.sort("""id""" ).select(range(__magic_name__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__snake_case : List[str] = self._load_datasamples(1 )
__snake_case : List[str] = WhisperFeatureExtractor()
__snake_case : int = feature_extractor(__magic_name__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1E-4 ) )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : str = self._load_datasamples(1 )[0]
__snake_case : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__snake_case : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0]
self.assertTrue(np.all(np.mean(__magic_name__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1E-3 ) )
| 13 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCAmelCase : Optional[Any] = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 0 |
import doctest
from collections import deque
import numpy as np
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 165 | from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """whisper"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowerCamelCase_ : Tuple=5_1865 , lowerCamelCase_ : Dict=80 , lowerCamelCase_ : str=6 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=1536 , lowerCamelCase_ : int=1536 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : str=5_0257 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : int=256 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[str]=1500 , lowerCamelCase_ : Dict=448 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : List[Any]=5_0256 , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=[220, 5_0256] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Dict=256 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : List[Any]=0.0_5 , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : str=10 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Optional[int]=7 , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = num_mel_bins
UpperCamelCase = d_model
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_source_positions
UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , suppress_tokens=lowerCamelCase_ , begin_suppress_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase = {0: """batch"""}
else:
UpperCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="""inputs""" )
return common_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 2_2050 , lowerCamelCase_ : float = 5.0 , lowerCamelCase_ : int = 220 , ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase_ , framework=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , time_duration=lowerCamelCase_ , frequency=lowerCamelCase_ , )
UpperCamelCase = encoder_inputs["""input_features"""].shape[2]
UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = encoder_inputs.pop("""input_features""" )
UpperCamelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCamelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return 1E-3
| 165 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[Any] ={
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str =[
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__snake_case : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : List[Any] = input_str.replace(''' ''' ,'''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCamelCase_) == 26
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Union[str, Any] = True
elif char.isupper():
lowerCAmelCase__ : str = True
return all(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def lowerCAmelCase__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCAmelCase__ : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_faster()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_fastest()''' ,setup=lowerCamelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase : Tuple = get_tests_dir('''fixtures''')
lowerCamelCase : Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCamelCase : Dict = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Dict = 0
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Dict = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowercase : List[Any] = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
__lowercase : List[str] = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
__lowercase : int = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__a , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase : int = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
__a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase : Dict = AutoFeatureExtractor.from_pretrained(__a , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
__a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
with self.assertRaises(__a ):
__lowercase : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowercase : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
__lowercase : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
__lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase : Dict = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
__lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = True
try:
AutoConfig.register("""custom""" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
__lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowercase : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(__a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 233 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , ):
if attention_mask is None:
__lowercase : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowercase : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowercase : Any = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase_ )
if decoder_head_mask is None:
__lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
if cross_attn_head_mask is None:
__lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : str , __a : Tuple=13 , __a : List[Any]=7 , __a : Any=True , __a : List[str]=False , __a : Optional[Any]=99 , __a : Tuple=16 , __a : int=2 , __a : Optional[Any]=4 , __a : int=4 , __a : Any="relu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Dict=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=20 , __a : str=2 , __a : str=1 , __a : Optional[int]=0 , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Any = seq_length
__lowercase : Tuple = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Tuple = encoder_layerdrop
__lowercase : List[str] = decoder_layerdrop
__lowercase : Any = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : Dict = pad_token_id
__lowercase : List[str] = bos_token_id
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = self.eos_token_id # Eos Token
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowercase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
__lowercase : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowercase : List[str] = self.get_config()
__lowercase : str = prepare_mam_aaa_inputs_dict(__a , __a , __a )
return config, inputs_dict
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : str , __a : str ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval()
__lowercase : List[str] = inputs_dict["""input_ids"""]
__lowercase : Dict = inputs_dict["""attention_mask"""]
__lowercase : List[Any] = inputs_dict["""head_mask"""]
# first forward pass
__lowercase : List[str] = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowercase , __lowercase : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowercase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowercase : Optional[int] = model(__a , attention_mask=__a )["""last_hidden_state"""]
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
__lowercase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-2 ) )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = MaMaaaModel(config=__a ).to(__a ).eval()
__lowercase : Any = model(**__a )
__lowercase : str = outputs.encoder_last_hidden_state
__lowercase : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a )
__lowercase : Tuple = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Tuple = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = MaMaaaDecoder.from_pretrained(__a ).to(__a )
__lowercase : Tuple = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_A : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_A : Union[str, Any] = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_A : Optional[int] = True
_A : Union[str, Any] = True
_A : Any = False
_A : int = False
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = MaMaaaModelTester(self )
__lowercase : Union[str, Any] = ConfigTester(self , config_class=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : str = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__lowercase : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Any = copy.deepcopy(self._prepare_for_class(__a , __a ) )
if not self.is_encoder_decoder:
__lowercase : int = inputs["""input_ids"""]
del inputs["input_ids"]
else:
__lowercase : Optional[int] = inputs["""input_ids"""]
__lowercase : Optional[Any] = inputs.get("""decoder_input_ids""" , __a )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __a )
__lowercase : Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__lowercase : Dict = wte(__a )
else:
__lowercase : str = wte(__a )
__lowercase : Union[str, Any] = wte(__a )
with torch.no_grad():
model(**__a )[0]
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs()
__lowercase : List[str] = input_dict["""input_ids"""]
__lowercase : Optional[int] = input_ids.ne(1 ).to(__a )
__lowercase : List[Any] = MaMaaaForConditionalGeneration(__a ).eval().to(__a )
if torch_device == "cuda":
model.half()
model.generate(__a , attention_mask=__a )
model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
return torch.tensor(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
lowerCamelCase : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
__lowercase : Union[str, Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
__lowercase : int = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
__lowercase : int = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
__lowercase : int = model(**__a )[0]
__lowercase : int = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __a )
# change to expected output here
__lowercase : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
# change to intended input
__lowercase : Any = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
__lowercase : Union[str, Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
__lowercase : Tuple = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
__lowercase : Optional[Any] = model(**__a )[0]
__lowercase : Tuple = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __a )
# change to expected output here
__lowercase : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
__lowercase : Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
__lowercase : Dict = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__lowercase : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors="""pt""" )
__lowercase : Dict = model.generate(
input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
__lowercase : Any = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
__lowercase : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a )
assert generated == expected_en | 233 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase__ = False
# source code of `config_class`
UpperCamelCase__ = inspect.getsource(__a )
UpperCamelCase__ = _re_checkpoint.findall(__a )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase__ , UpperCamelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ = True
break
UpperCamelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(sorted(__a ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 178 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCAmelCase : str ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a = 'src/diffusers'
# Matches is_xxx_available()
_a = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_a = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_a = '\n{0} = None\n'
_a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = _re_backend.findall(__lowerCamelCase )
if len(__lowerCamelCase ) == 0:
return None
return "_and_".join(__lowerCamelCase )
def __a ( ):
with open(os.path.join(__lowerCamelCase, "__init__.py" ), "r", encoding="utf-8", newline="\n" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = {}
# Go through the end of the file
while line_index < len(__lowerCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase_ : Union[str, Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase_ : List[str] = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1:
UpperCAmelCase_ : Union[str, Any] = lines[line_index]
UpperCAmelCase_ : Optional[Any] = _re_single_line_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCamelCase ) > 0:
UpperCAmelCase_ : Optional[int] = objects
else:
line_index += 1
return backend_specific_objects
def __a ( __lowerCamelCase, __lowerCamelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCamelCase, __lowerCamelCase )
else:
return DUMMY_CLASS.format(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase=None ):
if backend_specific_objects is None:
UpperCAmelCase_ : Tuple = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase_ : str = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase_ : int = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase_ : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCamelCase, __lowerCamelCase ) for o in objects] )
UpperCAmelCase_ : int = dummy_file
return dummy_files
def __a ( __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase_ : Union[str, Any] = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase_ : List[str] = os.path.join(__lowerCamelCase, "utils" )
UpperCAmelCase_ : Optional[int] = {
backend: os.path.join(__lowerCamelCase, f"""dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
UpperCAmelCase_ : Any = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
else:
UpperCAmelCase_ : Any = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"""diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_a = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 61 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =AutoencoderKL
a__ ='''sample'''
a__ =1e-2
@property
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : int = (3_2, 3_2)
_UpperCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(A )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) -> int:
return (3, 3_2, 3_2)
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return (3, 3_2, 3_2)
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_UpperCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self ) -> Dict:
# enable deterministic behavior for gradient checkpointing
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : str = self.model_class(**A )
model.to(A )
assert not model.is_gradient_checkpointing and model.training
_UpperCAmelCase : Tuple = model(**A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCAmelCase : Tuple = torch.randn_like(A )
_UpperCAmelCase : Any = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCAmelCase : List[Any] = self.model_class(**A )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(A )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCAmelCase : List[Any] = model_a(**A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCAmelCase : Optional[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_UpperCAmelCase : Dict = dict(model.named_parameters() )
_UpperCAmelCase : List[str] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase , _UpperCAmelCase : Tuple = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(A )
_UpperCAmelCase : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
_UpperCAmelCase : Dict = model.to(A )
model.eval()
if torch_device == "mps":
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
_UpperCAmelCase : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : Tuple = image.to(A )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(A , sample_posterior=A , generator=A ).sample
_UpperCAmelCase : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCAmelCase : int = torch.tensor(
[
-4.00_78E-01,
-3.83_23E-04,
-1.26_81E-01,
-1.14_62E-01,
2.00_95E-01,
1.08_93E-01,
-8.82_47E-02,
-3.03_61E-01,
-9.86_44E-03,
] )
elif torch_device == "cpu":
_UpperCAmelCase : Tuple = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
_UpperCAmelCase : List[Any] = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(A , A , rtol=1E-2 ) )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , A ) -> Optional[Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(A ) for s in shape] )}.npy'
def __lowerCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , A=0 , A=(4, 3, 5_1_2, 5_1_2) , A=False ) -> List[str]:
_UpperCAmelCase : List[str] = torch.floataa if fpaa else torch.floataa
_UpperCAmelCase : str = torch.from_numpy(load_hf_numpy(self.get_file_format(A , A ) ) ).to(A ).to(A )
return image
def __lowerCAmelCase ( self , A="CompVis/stable-diffusion-v1-4" , A=False ) -> List[str]:
_UpperCAmelCase : str = '''fp16''' if fpaa else None
_UpperCAmelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
_UpperCAmelCase : Any = AutoencoderKL.from_pretrained(
A , subfolder='''vae''' , torch_dtype=A , revision=A , )
model.to(A ).eval()
return model
def __lowerCAmelCase ( self , A=0 ) -> int:
if torch_device == "mps":
return torch.manual_seed(A )
return torch.Generator(device=A ).manual_seed(A )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __lowerCAmelCase ( self , A , A , A ) -> Any:
_UpperCAmelCase : int = self.get_sd_vae_model()
_UpperCAmelCase : List[str] = self.get_sd_image(A )
_UpperCAmelCase : Any = self.get_generator(A )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(A , generator=A , sample_posterior=A ).sample
assert sample.shape == image.shape
_UpperCAmelCase : List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCAmelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(A , A , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : Dict = self.get_sd_vae_model(fpaa=A )
_UpperCAmelCase : List[Any] = self.get_sd_image(A , fpaa=A )
_UpperCAmelCase : Dict = self.get_generator(A )
with torch.no_grad():
_UpperCAmelCase : int = model(A , generator=A , sample_posterior=A ).sample
assert sample.shape == image.shape
_UpperCAmelCase : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCAmelCase : Union[str, Any] = torch.tensor(A )
assert torch_all_close(A , A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __lowerCAmelCase ( self , A , A , A ) -> List[Any]:
_UpperCAmelCase : List[str] = self.get_sd_vae_model()
_UpperCAmelCase : Union[str, Any] = self.get_sd_image(A )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(A ).sample
assert sample.shape == image.shape
_UpperCAmelCase : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCAmelCase : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(A , A , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , A , A ) -> Tuple:
_UpperCAmelCase : str = self.get_sd_vae_model()
_UpperCAmelCase : Dict = self.get_sd_image(A , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_UpperCAmelCase : Tuple = model.decode(A ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_UpperCAmelCase : int = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCAmelCase : Any = torch.tensor(A )
assert torch_all_close(A , A , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , A , A ) -> List[str]:
_UpperCAmelCase : Tuple = self.get_sd_vae_model(fpaa=A )
_UpperCAmelCase : int = self.get_sd_image(A , shape=(3, 4, 6_4, 6_4) , fpaa=A )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model.decode(A ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_UpperCAmelCase : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCAmelCase : List[str] = torch.tensor(A )
assert torch_all_close(A , A , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : str = self.get_sd_vae_model(fpaa=A )
_UpperCAmelCase : Optional[int] = self.get_sd_image(A , shape=(3, 4, 6_4, 6_4) , fpaa=A )
with torch.no_grad():
_UpperCAmelCase : Tuple = model.decode(A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model.decode(A ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(A , A , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , A ) -> Any:
_UpperCAmelCase : List[Any] = self.get_sd_vae_model()
_UpperCAmelCase : Union[str, Any] = self.get_sd_image(A , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_UpperCAmelCase : Tuple = model.decode(A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCAmelCase : int = model.decode(A ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(A , A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __lowerCAmelCase ( self , A , A ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.get_sd_vae_model()
_UpperCAmelCase : Optional[int] = self.get_sd_image(A )
_UpperCAmelCase : Optional[int] = self.get_generator(A )
with torch.no_grad():
_UpperCAmelCase : int = model.encode(A ).latent_dist
_UpperCAmelCase : List[str] = dist.sample(generator=A )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCAmelCase : Dict = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCAmelCase : List[Any] = torch.tensor(A )
_UpperCAmelCase : Union[str, Any] = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(A , A , atol=A )
| 68 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowerCAmelCase :int = get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
a__ ='''all_checks'''
a__ ='''basic_checks'''
a__ ='''no_checks'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
def lowerCamelCase_ (UpperCamelCase__ : Optional[dict] , UpperCamelCase__ : dict , UpperCamelCase__ : Tuple=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_UpperCAmelCase : Optional[Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase : str = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
def lowerCamelCase_ (UpperCamelCase__ : Optional[dict] , UpperCamelCase__ : dict ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_UpperCAmelCase : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info('''All the splits matched successfully.''' )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : bool = True ):
if record_checksum:
_UpperCAmelCase : Any = shaaaa()
with open(UpperCamelCase__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(UpperCamelCase__ )
_UpperCAmelCase : int = m.hexdigest()
else:
_UpperCAmelCase : Union[str, Any] = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 68 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a__: Union[str, Any] = False
try:
a__: List[Any] = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase = None,__lowerCamelCase = [] ):
A__ = 0
A__ = choices
A__ = prompt
if sys.platform == "win32":
A__ = '''*'''
else:
A__ = '''➔ '''
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = "" ):
if sys.platform != "win32":
writeColor(self.choices[index],32,__lowerCamelCase )
else:
forceWrite(self.choices[index],__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(__lowerCamelCase )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = 1 ):
A__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__lowerCamelCase )
move_cursor(__lowerCamelCase,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position,'''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position,'''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__lowerCamelCase )] for number in range(10 )] )
def UpperCamelCase ( self ):
A__ = int(chr(self.current_selection ) )
A__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN,__lowerCamelCase )
else:
return
else:
return
def UpperCamelCase ( self,__lowerCamelCase = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt,'''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''','''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''','''\n''' )
A__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__lowerCamelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position,'''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
A__ = int(builtins.input() )
except ValueError:
A__ = default_choice
else:
A__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1,'''UP''' )
clear_line()
self.write_choice(__lowerCamelCase,'''\n''' )
return choice
| 193 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__: List[Any] = logging.getLogger()
def UpperCamelCase__( )->Union[str, Any]:
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'''run_glue_deebert.py''' )
with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCamelCase,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self ):
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
| 193 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'facebook/bart-base': BartForConditionalGeneration}
__SCREAMING_SNAKE_CASE : int = {'facebook/bart-base': BartTokenizer}
def _a ( ) -> Optional[int]:
snake_case_ = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--config_name""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Where to store the final ONNX file.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" ) -> Union[str, Any]:
snake_case_ = model_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
snake_case_ = 0
snake_case_ = None
snake_case_ = 0
return huggingface_model, tokenizer
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
model.eval()
snake_case_ = None
snake_case_ = torch.jit.script(BARTBeamSearchGenerator(_SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
snake_case_ = """My friends are cool but they eat too many carbs."""
snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="""pt""" ).to(model.device )
snake_case_ = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , early_stopping=_SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _SCREAMING_SNAKE_CASE , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_SCREAMING_SNAKE_CASE , )
logger.info("""Model exported to {}""".format(_SCREAMING_SNAKE_CASE ) )
snake_case_ = remove_dup_initializers(os.path.abspath(_SCREAMING_SNAKE_CASE ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_SCREAMING_SNAKE_CASE ) )
snake_case_ = onnxruntime.InferenceSession(_SCREAMING_SNAKE_CASE )
snake_case_ = ort_sess.run(
_SCREAMING_SNAKE_CASE , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_SCREAMING_SNAKE_CASE ),
"""max_length""": np.array(_SCREAMING_SNAKE_CASE ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _a ( ) -> Optional[int]:
snake_case_ = parse_args()
snake_case_ = 5
snake_case_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case_ = torch.device(args.device )
snake_case_ , snake_case_ = load_model_tokenizer(args.model_name_or_path , _SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_SCREAMING_SNAKE_CASE )
if args.max_length:
snake_case_ = args.max_length
if args.num_beams:
snake_case_ = args.num_beams
if args.output_file_path:
snake_case_ = args.output_file_path
else:
snake_case_ = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _a ( _SCREAMING_SNAKE_CASE = 1_500_000 ) -> int:
snake_case_ = defaultdict(_SCREAMING_SNAKE_CASE )
snake_case_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
snake_case_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Union[str, Any]=[10, 20, 30, 40] , UpperCAmelCase_ : Union[str, Any]=[1, 1, 2, 1] , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict="relu" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Dict=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Any = image_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : List[str] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Union[str, Any] = depths
lowerCAmelCase : Dict = is_training
lowerCAmelCase : str = use_labels
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Union[str, Any] = scope
lowerCAmelCase : Dict = len(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
lowerCAmelCase : Any = TFResNetModel(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : List[str] = TFResNetForImageClassification(UpperCAmelCase_ )
lowerCAmelCase : str = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : int = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = TFResNetModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def lowercase__ ( self : Tuple ):
pass
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(UpperCAmelCase_ )
lowerCAmelCase : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : str = [*signature.parameters.keys()]
lowerCAmelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Any ):
def check_hidden_states_output(UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = model_class(UpperCAmelCase_ )
lowerCAmelCase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : List[str] = layer_type
lowerCAmelCase : Any = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[str] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Any ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : int = prepare_img()
lowerCAmelCase : str = image_processor(images=UpperCAmelCase_ , return_tensors='tf' )
# forward pass
lowerCAmelCase : Tuple = model(**UpperCAmelCase_ )
# verify the logits
lowerCAmelCase : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase_ , atol=1E-4 ) )
| 138 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=0 , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : Union[str, Any] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : Optional[Any] = projection_dim
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Any = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = TFDPRContextEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = TFDPRReader(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase : str = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = TFDPRModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[int] = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCAmelCase : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : List[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 138 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCAmelCase = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
lowerCAmelCase = 'huggingface/label-files'
if "o365" in model_name:
lowerCAmelCase = 366
lowerCAmelCase = 'object365-id2label.json'
else:
lowerCAmelCase = 91
lowerCAmelCase = 'coco-detection-id2label.json'
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str ):
lowerCAmelCase = dct.pop(_UpperCAmelCase )
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ):
lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:dim, :]
lowerCAmelCase = in_proj_bias[: dim]
lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase = in_proj_weight[
-dim :, :
]
lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
# transformer decoder self-attention layers
lowerCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:hidden_size, :]
lowerCAmelCase = in_proj_bias[:hidden_size]
lowerCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size:, :]
lowerCAmelCase = in_proj_bias[-hidden_size:]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Any ):
lowerCAmelCase = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
lowerCAmelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
lowerCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
lowerCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
if "input_proj" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCAmelCase = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_UpperCAmelCase )
# load image processor
lowerCAmelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCAmelCase = prepare_img()
lowerCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase = encoding['pixel_values']
lowerCAmelCase = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCAmelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
lowerCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
lowerCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 354 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 167 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a__( __lowerCAmelCase , unittest.TestCase ):
lowercase__ = BertTokenizer
lowercase__ = BertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
def lowercase_ ( self : Any ):
super().setUp()
a : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[int] , __snake_case : Dict ):
a : Dict = 'UNwant\u00E9d,running'
a : Dict = 'unwanted, running'
return input_text, output_text
def lowercase_ ( self : Any ):
a : int = self.tokenizer_class(self.vocab_file )
a : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowercase_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
a : Any = self.get_tokenizer()
a : Dict = self.get_rust_tokenizer()
a : List[Any] = 'UNwant\u00E9d,running'
a : List[str] = tokenizer.tokenize(lowerCAmelCase_ )
a : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
a : List[str] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
a : List[Any] = self.get_rust_tokenizer()
a : Dict = tokenizer.encode(lowerCAmelCase_ )
a : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
a : List[str] = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
a : str = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
a : Optional[int] = 'UNwant\u00E9d,running'
a : Dict = tokenizer.tokenize(lowerCAmelCase_ )
a : List[str] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
a : List[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
a : Tuple = self.get_rust_tokenizer()
a : Optional[int] = tokenizer.encode(lowerCAmelCase_ )
a : Tuple = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase_ ( self : int ):
a : Dict = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowercase_ ( self : str ):
a : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase_ ( self : Optional[int] ):
a : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowercase_ ( self : str ):
a : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase_ ( self : Optional[int] ):
a : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : str ):
a : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : int ):
a : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Dict ):
a : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : Dict = BasicTokenizer()
a : Optional[int] = 'a\n\'ll !!to?\'d of, can\'t.'
a : List[str] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase_ ( self : List[Any] ):
a : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a : List[Any] = {}
for i, token in enumerate(lowerCAmelCase_ ):
a : str = i
a : List[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowercase_ ( self : str ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowercase_ ( self : Tuple ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowercase_ ( self : List[Any] ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = self.get_tokenizer()
a : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowercase_ ( self : Dict ):
a : int = self.tokenizer_class.from_pretrained('bert-base-uncased' )
a : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
a : int = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
a : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def lowercase_ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
a : Optional[int] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
a : str = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
a : Optional[Any] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
a : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowercase_ ( self : Any ):
a : str = ['的', '人', '有']
a : Tuple = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Any = True
a : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
a : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
a : str = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a : Any = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a : List[str] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
a : Optional[int] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
a : List[Any] = False
a : int = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
a : Union[str, Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
a : Any = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a : Optional[int] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a : Any = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
a : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
a : Dict = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 365 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=2 , __snake_case : Union[str, Any]=8 , __snake_case : List[str]=True , __snake_case : Dict=True , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : Tuple=99 , __snake_case : int=16 , __snake_case : Optional[int]=5 , __snake_case : int=2 , __snake_case : Tuple=36 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Tuple=5_12 , __snake_case : str=16 , __snake_case : str=2 , __snake_case : int=0.02 , __snake_case : Optional[int]=3 , __snake_case : List[Any]=4 , __snake_case : Any=None , ):
a : int = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : List[str] = is_training
a : Dict = use_input_mask
a : Union[str, Any] = use_token_type_ids
a : Tuple = use_labels
a : Dict = vocab_size
a : Optional[int] = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : str = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Tuple = type_vocab_size
a : int = type_sequence_label_size
a : List[Any] = initializer_range
a : List[str] = num_labels
a : List[str] = num_choices
a : Optional[Any] = scope
def lowercase_ ( self : Union[str, Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = None
a : int = None
a : Any = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Union[str, Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] ):
a : List[Any] = self.get_config()
a : Optional[Any] = 3_00
return config
def lowercase_ ( self : Union[str, Any] ):
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
a : Union[str, Any] = True
a : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Any ):
a : Dict = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
a : List[str] = model(__snake_case , token_type_ids=__snake_case )
a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , ):
a : Optional[Any] = True
a : Optional[int] = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
a : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
a : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[Any] ):
a : Union[str, Any] = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : int ):
a : Optional[int] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Dict , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : str ):
a : Tuple = self.num_labels
a : Dict = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : int ):
a : Tuple = self.num_labels
a : Tuple = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Any , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : str ):
a : Optional[int] = self.num_choices
a : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : int = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = ()
def lowercase_ ( self : Any ):
a : Tuple = MraModelTester(self )
a : str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : Any ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Dict = type
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowercase_ ( self : Tuple ):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase_ ( self : Union[str, Any] ):
return
@require_torch
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
a : List[str] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Optional[int] = model(__snake_case )[0]
a : Any = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __snake_case )
a : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
a : Optional[int] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Dict = model(__snake_case )[0]
a : Union[str, Any] = 5_02_65
a : Dict = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
a : Optional[int] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
a : Tuple = model(__snake_case )[0]
a : List[Any] = 5_02_65
a : str = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) ) | 96 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = LayoutLMTokenizer
_snake_case : List[str] = LayoutLMTokenizerFast
_snake_case : int = True
_snake_case : Tuple = True
def lowerCAmelCase_ ( self : Optional[int] ):
super().setUp()
_UpperCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : int , **__lowerCAmelCase : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = """UNwant\u00E9d,running"""
_UpperCAmelCase = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase_ ( self : Tuple ):
pass
| 289 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Any:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : Optional[List] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : int = len(self.languages ) if self.languages else None
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = set(self.languages )
if self.languages and set(__UpperCAmelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 16 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_SCREAMING_SNAKE_CASE = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_lowerCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 158 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = []
for part_id in partition_order:
_lowerCAmelCase = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(100 ).repartition(1 )
_lowerCAmelCase = Spark(SCREAMING_SNAKE_CASE_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(10 ).repartition(2 )
_lowerCAmelCase = [1, 0]
_lowerCAmelCase = _generate_iterable_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Reverse the partitions.
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(10 ).repartition(1 )
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase = lambda SCREAMING_SNAKE_CASE_ : x.reverse()
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [2, 1, 0] )
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shuffle_data_sources(SCREAMING_SNAKE_CASE_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(100 ).repartition(1 )
_lowerCAmelCase = Spark(SCREAMING_SNAKE_CASE_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 158 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase : Union[str, Any] = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, "r", encoding="utf-8") as f:
lowerCamelCase : int = json.load(f)
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(__lowerCamelCase )
def a__ ( self : Optional[Any] , A_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def a__ ( self : Dict , A_ : Any , A_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = f"""facebook/wmt19-{pair}"""
lowerCamelCase_ = self.get_tokenizer(__lowerCamelCase )
lowerCamelCase_ = self.get_model(__lowerCamelCase )
lowerCamelCase_ = bleu_data[pair]['''src''']
lowerCamelCase_ = bleu_data[pair]['''tgt''']
lowerCamelCase_ = tokenizer(__lowerCamelCase , return_tensors='pt' , truncation=__lowerCamelCase , padding='longest' ).to(__lowerCamelCase )
lowerCamelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase_ = tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
lowerCamelCase_ = calculate_bleu(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
self.assertGreaterEqual(scores['bleu'] , __lowerCamelCase )
| 356 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : dict , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = set(lowercase ), [start]
while stack:
lowerCamelCase_ = stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
lowerCamelCase : int = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 208 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case ):
"""simple docstring"""
# initialize config
if "resnet-50" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
__SCREAMING_SNAKE_CASE : str = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
__SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=snake_case , backbone_config=snake_case )
# set label attributes
__SCREAMING_SNAKE_CASE : Any = '''panoptic''' in model_name
if is_panoptic:
__SCREAMING_SNAKE_CASE : str = 250
else:
__SCREAMING_SNAKE_CASE : Any = 91
__SCREAMING_SNAKE_CASE : int = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Dict = '''coco-detection-id2label.json'''
__SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Dict = idalabel
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a__ ( snake_case ):
"""simple docstring"""
# here we list all keys to be renamed (original name on the left, our name on the right)
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : Any = val
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ''''''
if is_panoptic:
__SCREAMING_SNAKE_CASE : str = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE : str = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[:256, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[:256]
__SCREAMING_SNAKE_CASE : int = in_proj_weight_cross_attn[256:512, :]
__SCREAMING_SNAKE_CASE : Any = in_proj_bias_cross_attn[256:512]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[-256:, :]
__SCREAMING_SNAKE_CASE : int = in_proj_bias_cross_attn[-256:]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = get_detr_config(snake_case )
# load original model from torch hub
__SCREAMING_SNAKE_CASE : Tuple = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'''Converting model {model_name}...''' )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case ):
if is_panoptic:
__SCREAMING_SNAKE_CASE : Any = '''detr.''' + src
rename_key(snake_case , snake_case , snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case , is_panoptic=snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE : Any = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__SCREAMING_SNAKE_CASE : str = state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : Any = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = val
# finally, create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE : Dict = DetrForSegmentation(snake_case ) if is_panoptic else DetrForObjectDetection(snake_case )
model.load_state_dict(snake_case )
model.eval()
# verify our conversion on an image
__SCREAMING_SNAKE_CASE : List[str] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__SCREAMING_SNAKE_CASE : Dict = DetrImageProcessor(format=snake_case )
__SCREAMING_SNAKE_CASE : int = processor(images=prepare_img() , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : List[str] = encoding['''pixel_values''']
__SCREAMING_SNAKE_CASE : Optional[Any] = detr(snake_case )
__SCREAMING_SNAKE_CASE : int = model(snake_case )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowercase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303 | 1 |
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 364 |
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ : Tuple = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowercase__ : str = None
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
lowerCAmelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=snake_case__ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=snake_case__ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]:
lowerCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
def remove_articles(snake_case__ ):
return ARTICLES_REGEX.sub(''' ''' , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
if not s:
return []
return normalize_answer(snake_case__ ).split()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int:
return int(normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = get_tokens(snake_case__ )
lowerCAmelCase = get_tokens(snake_case__ )
lowerCAmelCase = collections.Counter(snake_case__ ) & collections.Counter(snake_case__ )
lowerCAmelCase = sum(common.values() )
if len(snake_case__ ) == 0 or len(snake_case__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCAmelCase = 1.0 * num_same / len(snake_case__ )
lowerCAmelCase = 1.0 * num_same / len(snake_case__ )
lowerCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = {}
lowerCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase = qa['''id''']
lowerCAmelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(snake_case__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase = ['''''']
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
lowerCAmelCase = preds[qid]
# Take max over all gold answers
lowerCAmelCase = max(compute_exact(snake_case__ , snake_case__ ) for a in gold_answers )
lowerCAmelCase = max(compute_fa(snake_case__ , snake_case__ ) for a in gold_answers )
return exact_scores, fa_scores
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
lowerCAmelCase = {}
for qid, s in scores.items():
lowerCAmelCase = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase = float(not qid_to_has_ans[qid] )
else:
lowerCAmelCase = s
return new_scores
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=None ) -> Union[str, Any]:
if not qid_list:
lowerCAmelCase = len(snake_case__ )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_00.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
lowerCAmelCase = len(snake_case__ )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
for k in new_eval:
lowerCAmelCase = new_eval[k]
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
plt.step(snake_case__ , snake_case__ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(snake_case__ , snake_case__ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(snake_case__ )
plt.savefig(snake_case__ )
plt.clf()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Optional[Any]:
lowerCAmelCase = sorted(snake_case__ , key=lambda snake_case__ : na_probs[k] )
lowerCAmelCase = 0.0
lowerCAmelCase = 1.0
lowerCAmelCase = 0.0
lowerCAmelCase = [1.0]
lowerCAmelCase = [0.0]
lowerCAmelCase = 0.0
for i, qid in enumerate(snake_case__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase = true_pos / float(i + 1 )
lowerCAmelCase = true_pos / float(snake_case__ )
if i == len(snake_case__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__ )
recalls.append(snake_case__ )
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return {"ap": 1_00.0 * avg_prec}
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
if out_image_dir and not os.path.exists(snake_case__ ):
os.makedirs(snake_case__ )
lowerCAmelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCAmelCase = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
lowerCAmelCase = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
lowerCAmelCase = {k: float(snake_case__ ) for k, v in qid_to_has_ans.items()}
lowerCAmelCase = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(snake_case__ , snake_case__ , '''pr_exact''' )
merge_eval(snake_case__ , snake_case__ , '''pr_f1''' )
merge_eval(snake_case__ , snake_case__ , '''pr_oracle''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if not qid_list:
return
lowerCAmelCase = [na_probs[k] for k in qid_list]
lowerCAmelCase = np.ones_like(snake_case__ ) / float(len(snake_case__ ) )
plt.hist(snake_case__ , weights=snake_case__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(snake_case__ , f"na_prob_hist_{name}.png" ) )
plt.clf()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
lowerCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCAmelCase = num_no_ans
lowerCAmelCase = cur_score
lowerCAmelCase = 0.0
lowerCAmelCase = sorted(snake_case__ , key=lambda snake_case__ : na_probs[k] )
for i, qid in enumerate(snake_case__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase = scores[qid]
else:
if preds[qid]:
lowerCAmelCase = -1
else:
lowerCAmelCase = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase = cur_score
lowerCAmelCase = na_probs[qid]
return 1_00.0 * best_score / len(snake_case__ ), best_thresh
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase , lowerCAmelCase = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = best_exact
lowerCAmelCase = exact_thresh
lowerCAmelCase = best_fa
lowerCAmelCase = fa_thresh
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
with open(OPTS.data_file ) as f:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
lowerCAmelCase = json.load(snake_case__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCAmelCase = json.load(snake_case__ )
else:
lowerCAmelCase = {k: 0.0 for k in preds}
lowerCAmelCase = make_qid_to_has_ans(snake_case__ ) # maps qid to True/False
lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase , lowerCAmelCase = get_raw_scores(snake_case__ , snake_case__ )
lowerCAmelCase = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh )
lowerCAmelCase = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh )
lowerCAmelCase = make_eval_dict(snake_case__ , snake_case__ )
if has_ans_qids:
lowerCAmelCase = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__ )
merge_eval(snake_case__ , snake_case__ , '''HasAns''' )
if no_ans_qids:
lowerCAmelCase = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__ )
merge_eval(snake_case__ , snake_case__ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir )
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
else:
print(json.dumps(snake_case__ , indent=2 ) )
if __name__ == "__main__":
lowercase__ : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 338 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ):
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase_: Any = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Union[str, Any] = jax.device_count()
UpperCamelCase_: List[str] = num_samples * [prompt]
UpperCamelCase_: Optional[Any] = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_: List[str] = replicate(_SCREAMING_SNAKE_CASE )
UpperCamelCase_: Any = shard(_SCREAMING_SNAKE_CASE )
UpperCamelCase_: int = jax.random.PRNGKey(0 )
UpperCamelCase_: Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCamelCase_: Dict = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_: Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: int = images[0, 253:256, 253:256, -1]
UpperCamelCase_: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = """stabilityai/stable-diffusion-2"""
UpperCamelCase_: Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
UpperCamelCase_: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase_: Optional[Any] = scheduler_params
UpperCamelCase_: int = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Optional[Any] = jax.device_count()
UpperCamelCase_: Optional[Any] = num_samples * [prompt]
UpperCamelCase_: Any = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_: Optional[int] = replicate(_SCREAMING_SNAKE_CASE )
UpperCamelCase_: Dict = shard(_SCREAMING_SNAKE_CASE )
UpperCamelCase_: int = jax.random.PRNGKey(0 )
UpperCamelCase_: Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCamelCase_: List[str] = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_: Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: Optional[Any] = images[0, 253:256, 253:256, -1]
UpperCamelCase_: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: List[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 360 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223 | 0 |
"""simple docstring"""
a : str = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
a : Union[str, Any] = frozenset(['''prompt''', '''negative_prompt'''])
a : str = frozenset([])
a : Tuple = frozenset(['''image'''])
a : Tuple = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
a : Union[str, Any] = frozenset(['''image'''])
a : Dict = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
a : Optional[int] = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
a : str = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
a : Union[str, Any] = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
a : List[Any] = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
a : Tuple = frozenset(['''image''', '''mask_image'''])
a : Tuple = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
a : Optional[Any] = frozenset(['''example_image''', '''image''', '''mask_image'''])
a : List[Any] = frozenset(['''class_labels'''])
a : Any = frozenset(['''class_labels'''])
a : Dict = frozenset(['''batch_size'''])
a : Union[str, Any] = frozenset([])
a : Dict = frozenset(['''batch_size'''])
a : List[Any] = frozenset([])
a : Optional[int] = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
a : int = frozenset(['''prompt''', '''negative_prompt'''])
a : List[str] = frozenset(['''input_tokens'''])
a : Any = frozenset(['''input_tokens'''])
| 105 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Tuple = {'''allegro/herbert-base-cased''': 514}
a : Optional[int] = {}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Dict = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_a= random.Random()
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=1.0 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None ) -> List[str]:
'''simple docstring'''
if rng is None:
__snake_case : Union[str, Any] = global_rng
__snake_case : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase ( unittest.TestCase ):
def __init__(self : Any , _A : str , _A : Any=7 , _A : List[str]=4_00 , _A : Any=20_00 , _A : Optional[Any]=24 , _A : Dict=24 , _A : Optional[Any]=0.0 , _A : int=1_60_00 , _A : Tuple=True , _A : str=True , ) -> Optional[int]:
__snake_case : Tuple = parent
__snake_case : Any = batch_size
__snake_case : List[str] = min_seq_length
__snake_case : Dict = max_seq_length
__snake_case : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case : Tuple = feature_size
__snake_case : List[str] = num_mel_bins
__snake_case : Optional[int] = padding_value
__snake_case : Any = sampling_rate
__snake_case : Optional[Any] = return_attention_mask
__snake_case : List[Any] = do_normalize
def _lowercase (self : List[str]) -> Dict:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]=False , _A : List[str]=False) -> Union[str, Any]:
def _flatten(_A : Optional[int]):
return list(itertools.chain(*lowercase_))
if equal_length:
__snake_case : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__snake_case : Optional[Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__snake_case : Dict = [np.asarray(lowercase_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
UpperCAmelCase : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowercase (self : Any) -> int:
__snake_case : str = SpeechaTextFeatureExtractionTester(self)
def _lowercase (self : Any , _A : Optional[Any]) -> Dict:
self.assertTrue(np.all(np.mean(lowercase_ , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0) - 1) < 1E-3))
def _lowercase (self : Any) -> Tuple:
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__snake_case : int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : List[str] = [np.asarray(lowercase_) for speech_input in speech_inputs]
# Test feature size
__snake_case : int = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors='np').input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
__snake_case : int = feature_extractor(speech_inputs[0] , return_tensors='np').input_features
__snake_case : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='np').input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3))
# Test batched
__snake_case : Dict = feature_extractor(lowercase_ , return_tensors='np').input_features
__snake_case : Tuple = feature_extractor(lowercase_ , return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3))
# Test 2-D numpy arrays are batched.
__snake_case : List[Any] = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
__snake_case : Tuple = np.asarray(lowercase_)
__snake_case : int = feature_extractor(lowercase_ , return_tensors='np').input_features
__snake_case : str = feature_extractor(lowercase_ , return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3))
def _lowercase (self : Union[str, Any]) -> List[Any]:
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : int = ["longest", "max_length", "do_not_pad"]
__snake_case : Optional[Any] = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_):
__snake_case : Dict = feature_extractor(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_)
__snake_case : Any = inputs.input_features
__snake_case : List[Any] = inputs.attention_mask
__snake_case : Dict = [np.sum(lowercase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowercase (self : int) -> Dict:
__snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Union[str, Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : Union[str, Any] = ["longest", "max_length", "do_not_pad"]
__snake_case : Optional[Any] = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_):
__snake_case : Optional[Any] = feature_extractor(
lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors='np' , return_attention_mask=lowercase_)
__snake_case : Optional[int] = inputs.input_features
__snake_case : Union[str, Any] = inputs.attention_mask
__snake_case : Union[str, Any] = [np.sum(lowercase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowercase (self : Optional[int]) -> Dict:
__snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : List[Any] = feature_extractor(
lowercase_ , padding='max_length' , max_length=4 , truncation=lowercase_ , return_tensors='np' , return_attention_mask=lowercase_ , )
__snake_case : Dict = inputs.input_features
__snake_case : Optional[int] = inputs.attention_mask
__snake_case : int = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def _lowercase (self : List[Any]) -> Dict:
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Optional[int] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : Dict = feature_extractor(
lowercase_ , padding='longest' , max_length=4 , truncation=lowercase_ , return_tensors='np' , return_attention_mask=lowercase_ , )
__snake_case : Tuple = inputs.input_features
__snake_case : int = inputs.attention_mask
__snake_case : List[Any] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
__snake_case : Tuple = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
__snake_case : Dict = feature_extractor(
lowercase_ , padding='longest' , max_length=16 , truncation=lowercase_ , return_tensors='np' , return_attention_mask=lowercase_ , )
__snake_case : Dict = inputs.input_features
__snake_case : int = inputs.attention_mask
__snake_case : Any = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def _lowercase (self : Dict) -> int:
import torch
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Union[str, Any] = np.random.rand(1_00 , 32).astype(np.floataa)
__snake_case : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case : Optional[int] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_features.dtype == np.floataa)
__snake_case : Dict = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _lowercase (self : Any , _A : str) -> int:
from datasets import load_dataset
__snake_case : Any = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
__snake_case : int = ds.sort('id').select(range(lowercase_))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase (self : List[str]) -> Optional[Any]:
__snake_case : List[Any] = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
])
# fmt: on
__snake_case : List[Any] = self._load_datasamples(1)
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__snake_case : Optional[int] = feature_extractor(lowercase_ , return_tensors='pt').input_features
self.assertEquals(input_features.shape , (1, 5_84, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4))
| 351 | """simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[int] = """new-model"""
if is_tf_available():
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = NewModelConfig
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : List[str]) -> Dict:
__snake_case : Any = 'bert-base-cased'
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : List[Any]) -> str:
__snake_case : Optional[int] = 'bert-base-cased'
__snake_case : List[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A)
__snake_case , __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Tuple) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Union[str, Any]) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_A)
__snake_case , __snake_case : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> Union[str, Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(_A)
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : Tuple = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Tuple = TFAutoModelForSequenceClassification.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Optional[Any]) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : List[str] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Any = TFAutoModelForQuestionAnswering.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
@require_tensorflow_probability
def _lowercase (self : List[Any]) -> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : int = TFAutoModelForTableQuestionAnswering.from_pretrained(_A)
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[Any]) -> Optional[Any]:
__snake_case : Optional[int] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Any) -> List[str]:
__snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Optional[Any]) -> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(_A , _A)
__snake_case : int = copy.deepcopy(model.config)
__snake_case : int = ['FunnelBaseModel']
__snake_case : int = TFAutoModel.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : List[Any] = TFAutoModel.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> int:
try:
AutoConfig.register('new-model' , _A)
__snake_case : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
auto_class.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self).get_config()
__snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict())
__snake_case : List[str] = auto_class.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : Tuple = auto_class.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowercase (self : Optional[int]) -> Union[str, Any]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case : Any = TFAutoModel.from_pretrained('bert-base')
def _lowercase (self : str) -> str:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : Optional[Any] = TFAutoModel.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : int) -> Any:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[Any]) -> Any:
with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model'):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def _lowercase (self : str) -> Any:
# Make sure we have cached the model.
__snake_case : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
__snake_case : Optional[int] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
__snake_case : Any = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 95 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def __lowerCamelCase ( ):
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 130 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowercase__ : List[Any] = 6
lowercase__ : List[str] = 128
lowercase__ : Optional[Any] = (2, 2, 18, 2)
lowercase__ : Optional[int] = (4, 8, 16, 32)
elif "large" in model_name:
lowercase__ : Optional[Any] = 12
lowercase__ : List[str] = 192
lowercase__ : int = (2, 2, 18, 2)
lowercase__ : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
lowercase__ : str = window_size
lowercase__ : Union[str, Any] = embed_dim
lowercase__ : List[str] = depths
lowercase__ : Any = num_heads
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "encoder.mask_token" in name:
lowercase__ : Any = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
lowercase__ : Tuple = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
lowercase__ : Dict = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
lowercase__ : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : List[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : Any = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : Optional[int] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : Dict = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : List[str] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
lowercase__ : str = "layernorm.weight"
if name == "encoder.norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
lowercase__ : List[str] = "swin." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[int] = orig_state_dict.pop(lowerCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : Tuple = int(key_split[2] )
lowercase__ : Optional[int] = int(key_split[4] )
lowercase__ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : List[Any] = val[:dim, :]
lowercase__ : Optional[Any] = val[
dim : dim * 2, :
]
lowercase__ : Any = val[-dim:, :]
else:
lowercase__ : int = val[
:dim
]
lowercase__ : str = val[
dim : dim * 2
]
lowercase__ : str = val[
-dim:
]
else:
lowercase__ : int = val
return orig_state_dict
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
lowercase__ : Optional[Any] = get_swin_config(lowerCamelCase__ )
lowercase__ : Any = SwinForMaskedImageModeling(lowerCamelCase__ )
model.eval()
lowercase__ : List[str] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
lowercase__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = ViTImageProcessor(size={"height": 192, "width": 192} )
lowercase__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**lowerCamelCase__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 130 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A : Union[str, Any] = False
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger "
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = generator.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case ( self ):
__lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger "
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 370 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Optional[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : Tuple, A_ : Tuple, A_ : Any, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = [False] * len(A_ )
_lowerCamelCase : Union[str, Any] = []
queue.append(A_ )
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = u
return visited[t]
def snake_case_ ( A_ : Union[str, Any], A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [-1] * (len(A_ ))
_lowerCamelCase : Optional[Any] = 0
while bfs(A_, A_, A_, A_ ):
_lowerCamelCase : Optional[Any] = float('''Inf''' )
_lowerCamelCase : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(A_, graph[parent[s]][s] )
_lowerCamelCase : List[str] = parent[s]
max_flow += path_flow
_lowerCamelCase : List[str] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : Optional[Any] = parent[v]
return max_flow
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase__ , lowerCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 72 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def _a ( _lowercase : float , _lowercase : float , _lowercase : float ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Any , snake_case : Optional[int]=13 , snake_case : List[str]=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Tuple=True , snake_case : int=99 , snake_case : Any=16 , snake_case : Dict=36 , snake_case : Any=6 , snake_case : Dict=6 , snake_case : Dict=6 , snake_case : int=37 , snake_case : int="gelu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : Dict=512 , snake_case : List[Any]=16 , snake_case : Any=2 , snake_case : Any=0.02 , snake_case : Optional[int]=3 , snake_case : List[Any]=4 , snake_case : List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = embedding_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_hidden_groups
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : str = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , sentence_order_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Any = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[str] ) -> int:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : List[Any] = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = True
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Tuple=False ) -> Optional[Any]:
__UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[Any] = AlbertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
__UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 240 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCAmelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_UpperCAmelCase = Dataset.from_dict(__lowercase )
return dataset
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = get_dataset()
_UpperCAmelCase = make_duplicate_clusters(snake_case_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowercase ( self : int ):
_UpperCAmelCase = get_dataset()
_UpperCAmelCase , _UpperCAmelCase = deduplicate_dataset(snake_case_ )
self.assertEqual(len(snake_case_ ) , 2 )
print(snake_case_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , snake_case_ )
| 22 |
from jiwer import compute_measures
import datasets
lowerCamelCase : str = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase : int = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCamelCase : str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase ( self , A=None , A=None , A=False ) -> List[Any]:
if concatenate_texts:
return compute_measures(A , A )["wer"]
else:
snake_case : Any = 0
snake_case : Any = 0
for prediction, reference in zip(A , A ):
snake_case : Tuple = compute_measures(A , A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 124 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if metric == "rouge2":
_snake_case = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_snake_case = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_snake_case = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
_snake_case = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
_snake_case = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class _lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase )
@rank_zero_only
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case = od / """test_results.txt"""
_snake_case = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase )
with open(UpperCAmelCase , """a+""" ) as writer:
for key in sorted(UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case = metrics[key]
if isinstance(UpperCAmelCase , torch.Tensor ):
_snake_case = val.item()
_snake_case = f"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
_snake_case = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(UpperCAmelCase )
@rank_zero_only
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
try:
_snake_case = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case = pl_module.model.num_parameters()
_snake_case = count_trainable_parameters(UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase , UpperCAmelCase , """test""" )
@rank_zero_only
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 361 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_SCREAMING_SNAKE_CASE , max_perimeter + 1 ):
_snake_case = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_SCREAMING_SNAKE_CASE ):
_snake_case = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1000 ):
_snake_case = pythagorean_triple(_SCREAMING_SNAKE_CASE )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 270 | 0 |
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[Any] = 1
while len(_UpperCAmelCase ) < 1e6:
constant.append(str(_UpperCAmelCase ) )
i += 1
lowerCamelCase__ : Optional[Any] = ''.join(_UpperCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 50 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = "levit"
def __init__( self : List[Any] , UpperCamelCase : List[str]=2_24 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Any=1 , UpperCamelCase : int=16 , UpperCamelCase : List[str]=[1_28, 2_56, 3_84] , UpperCamelCase : Optional[Any]=[4, 8, 12] , UpperCamelCase : Optional[int]=[4, 4, 4] , UpperCamelCase : str=[16, 16, 16] , UpperCamelCase : Tuple=0 , UpperCamelCase : List[str]=[2, 2, 2] , UpperCamelCase : Optional[int]=[2, 2, 2] , UpperCamelCase : Optional[int]=0.02 , **UpperCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : int = kernel_size
lowerCAmelCase__ : Any = stride
lowerCAmelCase__ : List[str] = padding
lowerCAmelCase__ : Tuple = hidden_sizes
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : List[Any] = depths
lowerCAmelCase__ : List[str] = key_dim
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : Dict = attention_ratio
lowerCAmelCase__ : Tuple = mlp_ratio
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Dict = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Tuple = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
| 242 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 233 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = CycleDiffusionPipeline
__lowercase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowercase: str = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowercase: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""})
__lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ = CLIPTextModel(UpperCAmelCase_ )
snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=0 ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = image / 2 + 0.5
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase_ , """half""" ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 233 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.