code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import torch
def a_ ( ):
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 0
print(f'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 571 |
"""simple docstring"""
def a_ ( __a , __a ):
return int(input_a == input_a == 0 )
def a_ ( ):
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 571 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowercase :
def __init__( self , A_ , A_=16 , A_=13 , A_=7 , A_=14 , A_=10 , A_=19 , A_=5 , A_=4 , A_=True , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=[1, 2, 3, 4, 5] , A_=25 , A_=5 , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : str = context_length
__lowerCAmelCase : Any = cardinality
__lowerCAmelCase : Tuple = num_time_features
__lowerCAmelCase : List[str] = lags_sequence
__lowerCAmelCase : Any = embedding_dimension
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = context_length
__lowerCAmelCase : Any = prediction_length + label_length
__lowerCAmelCase : Union[str, Any] = label_length
__lowerCAmelCase : Any = moving_average
__lowerCAmelCase : Optional[int] = autocorrelation_factor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = config.context_length + max(config.lags_sequence )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_config()
__lowerCAmelCase : Tuple = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoformerModel(config=A_ ).to(A_ ).eval()
__lowerCAmelCase : Optional[int] = model(**A_ )
__lowerCAmelCase : Dict = outputs.encoder_last_hidden_state
__lowerCAmelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = model.get_encoder()
encoder.save_pretrained(A_ )
__lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : Any = model.create_network_inputs(**A_ )
__lowerCAmelCase : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase : Optional[Any] = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(A_ )
__lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : List[str] = decoder(
trend=A_ , inputs_embeds=A_ , encoder_hidden_states=A_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = AutoformerModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__lowerCAmelCase : Optional[Any] = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inspect.signature(getattr(A_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(A_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A_ )] , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''seq_length''' , A_ )
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , A_ )
__lowerCAmelCase : Any = getattr(self.model_tester , '''encoder_seq_length''' , A_ )
__lowerCAmelCase : List[str] = getattr(self.model_tester , '''d_model''' , A_ )
__lowerCAmelCase : int = getattr(self.model_tester , '''num_attention_heads''' , A_ )
__lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.encoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase : Optional[Any] = len(A_ )
__lowerCAmelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_ , A_ )
# decoder attentions
__lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 2 , len(A_ ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( lowercase__="train-batch.pt" ):
__lowerCAmelCase : List[str] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase__ , repo_type='''dataset''' )
__lowerCAmelCase : Tuple = torch.load(lowercase__ , map_location=lowercase__ )
return batch
@require_torch
@slow
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Tuple = prepare_batch()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A_ )
__lowerCAmelCase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A_ )
__lowerCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A_ , rtol=1e-1 ) )
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __magic_name__ ( ) -> Optional[int]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase_ : Dict = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , """os.path.join""" , _A ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
assert _test_patching.open is open
lowercase_ : Optional[Any] = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , _A ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , """pandas.read_csv""" , _A ):
pass
def __magic_name__ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , _A ) is None
with patch_submodule(_test_patching , """len""" , _A ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __magic_name__ ( ) -> Dict:
"""simple docstring"""
lowercase_ : Union[str, Any] = '__test_patch_submodule_start_and_stop_mock__'
lowercase_ : List[Any] = patch_submodule(_test_patching , """open""" , _A )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __magic_name__ ( ) -> str:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase_ : Any = '__test_patch_submodule_successive_join__'
lowercase_ : int = '__test_patch_submodule_successive_dirname__'
lowercase_ : str = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , _A ):
with patch_submodule(_test_patching , """os.rename""" , _A ):
with patch_submodule(_test_patching , """os.path.dirname""" , _A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , _A ):
with patch_submodule(_test_patching , """os.path.join""" , _A ):
with patch_submodule(_test_patching , """os.path.dirname""" , _A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __magic_name__ ( ) -> int:
"""simple docstring"""
lowercase_ : List[Any] = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , _A ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , _A ):
pass | 458 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: Dict = '▁'
lowerCAmelCase: Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase: int = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase: Any = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
lowerCAmelCase: Optional[int] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase: List[Any] = {'mustc': MUSTC_LANGS}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
def __init__( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : str="<unk>" , __snake_case : Dict=False , __snake_case : int=False , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ):
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
a : Tuple = do_upper_case
a : Optional[Any] = do_lower_case
a : List[str] = load_json(__snake_case )
a : Dict = {v: k for k, v in self.encoder.items()}
a : int = spm_file
a : Tuple = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
a : Any = lang_codes
a : str = LANGUAGES[lang_codes]
a : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
a : str = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
a : Optional[Any] = self.lang_tokens
a : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a : List[str] = {}
@property
def lowercase_ ( self : Optional[Any] ):
return len(self.encoder )
@property
def lowercase_ ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self : int , __snake_case : Optional[int] ):
a : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def lowercase_ ( self : str , __snake_case : str ):
a : int = self.lang_code_to_id[tgt_lang]
a : int = [lang_code_id]
def lowercase_ ( self : Optional[int] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def lowercase_ ( self : List[Any] , __snake_case : int ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[Any] = []
a : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a : Union[str, Any] = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a : Optional[int] = []
else:
current_sub_tokens.append(__snake_case )
a : Tuple = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self : int , __snake_case : List[Any] , __snake_case : List[str]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
a : Optional[int] = [1] * len(self.prefix_tokens )
a : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
a : List[str] = self.__dict__.copy()
a : Union[str, Any] = None
return state
def __setstate__( self : str , __snake_case : Dict ):
a : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : int = {}
a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = Path(__snake_case )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
a : Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
a : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def lowerCamelCase__ ( _A ):
with open(_A , 'r' ) as f:
return json.load(_A )
def lowerCamelCase__ ( _A , _A ):
with open(_A , 'w' ) as f:
json.dump(_A , _A , indent=2 ) | 526 | 0 |
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCamelCase_ = str(bin(UpperCAmelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCamelCase_ = str(bin(UpperCAmelCase__ ) )[2:]
if shift_amount >= len(UpperCAmelCase__ ):
return "0b0"
lowerCamelCase_ = binary_number[: len(UpperCAmelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
if number >= 0: # Get binary representation of positive number
lowerCamelCase_ = """0""" + str(bin(UpperCAmelCase__ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCamelCase_ = len(bin(UpperCAmelCase__ )[3:] ) # Find 2's complement of number
lowerCamelCase_ = bin(abs(UpperCAmelCase__ ) - (1 << binary_number_length) )[3:]
lowerCamelCase_ = (
"""1""" + """0""" * (binary_number_length - len(UpperCAmelCase__ )) + binary_number
)
if shift_amount >= len(UpperCAmelCase__ ):
return "0b" + binary_number[0] * len(UpperCAmelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCAmelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowercase = {'''mobilebert-uncased''': 5_1_2}
lowercase = {}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : str=None , __UpperCamelCase : str=None , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="[UNK]" , __UpperCamelCase : str="[SEP]" , __UpperCamelCase : Dict="[PAD]" , __UpperCamelCase : List[str]="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Any=True , __UpperCamelCase : int=None , **__UpperCamelCase : Dict , ):
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__UpperCamelCase )
lowerCamelCase_ = do_lower_case
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict=None ):
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
lowerCamelCase_ = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 103 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
__SCREAMING_SNAKE_CASE = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__SCREAMING_SNAKE_CASE = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
__SCREAMING_SNAKE_CASE = black.format_str(lowerCamelCase ,mode=lowerCamelCase )
__SCREAMING_SNAKE_CASE = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(lowerCamelCase ,"""w""" ,newline="""\n""" ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=lowerCamelCase )
with open(lowerCamelCase ,"""r""" ) as f:
self.assertTrue(f.read() ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,lowerCamelCase ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,lowerCamelCase ) ,)
# Copy consistency with a really long name
__SCREAMING_SNAKE_CASE = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" ,f"""{long_class_name}SchedulerOutput""" ,re.sub("""Bert""" ,lowerCamelCase ,lowerCamelCase ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,lowerCamelCase ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,lowerCamelCase ) ,)
| 109 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
__SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )]
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
return test_module_path
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ )
return test_module
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] )
if len(lowerCAmelCase_ ) > 0:
test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_class()
if hasattr(lowerCAmelCase_ , "setUp" ):
test.setUp()
__SCREAMING_SNAKE_CASE = None
if hasattr(lowerCAmelCase_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
__SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o.__name__
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_json(lowerCAmelCase_ ) for x in o]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()}
else:
return o
| 682 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Dict = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''rwkv'''
__A = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , _lowerCAmelCase=5_0277 , _lowerCAmelCase=1024 , _lowerCAmelCase=4096 , _lowerCAmelCase=32 , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=6 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = context_length
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase = layer_norm_epsilon
lowercase = rescale_every
lowercase = use_cache
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(
tie_word_embeddings=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__a : List[str] = "\nimport os\n"
__a : str = "\ndef foo():\n import os\n return False\n"
__a : List[str] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
__a : List[Any] = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
__a : Optional[int] = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
__a : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
__a : Any = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
__a : Dict = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
__a : Tuple = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
__a : Union[str, Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
__a : List[str] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , __lowercase )
def _SCREAMING_SNAKE_CASE ( __lowercase : int , __lowercase : str ) -> Any:
"""simple docstring"""
__A = os.path.join(__lowercase , """test_file.py""" )
with open(__lowercase , """w""" ) as _tmp_file:
_tmp_file.write(__lowercase )
__A = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 637 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Tuple = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> List[Any]:
"""simple docstring"""
__A = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
__A = roberta.model.encoder.sentence_encoder
__A = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , __lowercase )
__A = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = roberta_sent_encoder.embed_tokens.weight
__A = roberta_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__A = roberta_sent_encoder.layer_norm.weight
__A = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = roberta_sent_encoder.layers[i]
__A = layer.attention
__A = roberta_layer.self_attn_layer_norm.weight
__A = roberta_layer.self_attn_layer_norm.bias
# self attention
__A = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__A = roberta_layer.self_attn.q_proj.weight
__A = roberta_layer.self_attn.q_proj.bias
__A = roberta_layer.self_attn.k_proj.weight
__A = roberta_layer.self_attn.k_proj.bias
__A = roberta_layer.self_attn.v_proj.weight
__A = roberta_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__A = roberta_layer.self_attn.out_proj.weight
__A = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__A = roberta_layer.final_layer_norm.weight
__A = roberta_layer.final_layer_norm.bias
# intermediate
__A = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# output
__A = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# end of layer
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].dense.weight
__A = roberta.model.classification_heads["""mnli"""].dense.bias
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight
__A = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__A = roberta.model.encoder.lm_head.dense.weight
__A = roberta.model.encoder.lm_head.dense.bias
__A = roberta.model.encoder.lm_head.layer_norm.weight
__A = roberta.model.encoder.lm_head.layer_norm.bias
__A = roberta.model.encoder.lm_head.weight
__A = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
__A = model(__lowercase )[0]
if classification_head:
__A = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__lowercase ) )
else:
__A = roberta.model(__lowercase )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__A = torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__a : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 637 | 1 |
from __future__ import annotations
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_a ) / len(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 |
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_lowerCamelCase = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
_lowerCamelCase = int(sequence[i] , 2 )
return sequence
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCamelCase = gray_code_sequence_string(bit_count - 1 )
_lowerCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCamelCase = '''0''' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCamelCase = '''1''' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase: str = 'Muhammad Umer Farooq'
lowerCAmelCase: List[str] = 'MIT'
lowerCAmelCase: Tuple = '1.0.0'
lowerCAmelCase: List[Any] = 'Muhammad Umer Farooq'
lowerCAmelCase: Optional[Any] = '[email protected]'
lowerCAmelCase: Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : str ):
super().__init__()
a : list[str] = []
a : List[Any] = domain
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a : Tuple = parse.urljoin(self.domain , __snake_case )
self.urls.append(__snake_case )
def lowerCamelCase__ ( _A ):
return ".".join(get_sub_domain_name(_A ).split('.' )[-2:] )
def lowerCamelCase__ ( _A ):
return parse.urlparse(_A ).netloc
def lowerCamelCase__ ( _A = "https://github.com" ):
a : Any = get_domain_name(_A )
# Initialize the parser
a : Tuple = Parser(_A )
try:
# Open URL
a : List[Any] = requests.get(_A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a : int = requests.get(_A )
# Get the valid email.
a : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_A )
if __name__ == "__main__":
lowerCAmelCase: Any = emails_from_url('https://github.com')
print(F"{len(emails)} emails found:")
print('\n'.join(sorted(emails))) | 526 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase: Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase: Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
lowerCAmelCase: int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase: int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 526 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = (1 - _cos) / 2
lowercase__ = 1 - _cos
lowercase__ = 1 + alpha
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = (1 + _cos) / 2
lowercase__ = -1 - _cos
lowercase__ = 1 + alpha
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = _sin / 2
lowercase__ = 0
lowercase__ = -ba
lowercase__ = 1 + alpha
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 1 - alpha
lowercase__ = -2 * _cos
lowercase__ = 1 + alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float ,_snake_case : float = 1 / sqrt(2 ) ,):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 10 ** (gain_db / 40)
lowercase__ = 1 + alpha * big_a
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha * big_a
lowercase__ = 1 + alpha / big_a
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha / big_a
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float ,_snake_case : float = 1 / sqrt(2 ) ,):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 10 ** (gain_db / 40)
lowercase__ = (big_a + 1) - (big_a - 1) * _cos
lowercase__ = (big_a + 1) + (big_a - 1) * _cos
lowercase__ = (big_a - 1) - (big_a + 1) * _cos
lowercase__ = (big_a - 1) + (big_a + 1) * _cos
lowercase__ = 2 * sqrt(_snake_case ) * alpha
lowercase__ = big_a * (pmc + aaa)
lowercase__ = 2 * big_a * mpc
lowercase__ = big_a * (pmc - aaa)
lowercase__ = ppmc + aaa
lowercase__ = -2 * pmpc
lowercase__ = ppmc - aaa
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowerCamelCase ( _snake_case : int ,_snake_case : int ,_snake_case : float ,_snake_case : float = 1 / sqrt(2 ) ,):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(_snake_case )
lowercase__ = cos(_snake_case )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 10 ** (gain_db / 40)
lowercase__ = (big_a + 1) - (big_a - 1) * _cos
lowercase__ = (big_a + 1) + (big_a - 1) * _cos
lowercase__ = (big_a - 1) - (big_a + 1) * _cos
lowercase__ = (big_a - 1) + (big_a + 1) * _cos
lowercase__ = 2 * sqrt(_snake_case ) * alpha
lowercase__ = big_a * (ppmc + aaa)
lowercase__ = -2 * big_a * pmpc
lowercase__ = big_a * (ppmc - aaa)
lowercase__ = pmc + aaa
lowercase__ = 2 * mpc
lowercase__ = pmc - aaa
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 539 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case (tf.keras.layers.Layer ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ) -> List[Any]:
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(UpperCAmelCase_ ,UpperCAmelCase_ ,sequence_length=UpperCAmelCase_ )
@classmethod
def _a ( cls ,UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
lowercase__ = [" ".join(UpperCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(UpperCAmelCase_ ,UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def _a ( cls ,UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = GPTaTokenizer.from_pretrained(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
return cls.from_tokenizer(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def _a ( cls ,UpperCAmelCase_ ) -> Optional[Any]:
return cls(**UpperCAmelCase_ )
def _a ( self ) -> Tuple:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> Tuple:
lowercase__ = self.tf_tokenizer(UpperCAmelCase_ )
lowercase__ = tf.ones_like(UpperCAmelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
UpperCAmelCase_ ,max_seq_length=UpperCAmelCase_ ,pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 539 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case__ : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case__ : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def _snake_case (__lowercase , __lowercase):
return np.sqrt(np.sum((np.asarray(__lowercase) - np.asarray(__lowercase)) ** 2))
def _snake_case (__lowercase , __lowercase):
return sum((va - va) ** 2 for va, va in zip(__lowercase , __lowercase)) ** (1 / 2)
if __name__ == "__main__":
def _snake_case ():
from timeit import timeit
print('Without Numpy')
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ))
print('With Numpy')
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ))
benchmark()
| 23 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =(PNDMScheduler,)
UpperCamelCase__ : Dict =(('num_inference_steps', 50),)
def lowerCamelCase ( self : Dict , **lowercase_ : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : Any , lowercase_ : str=0 , **lowercase_ : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Tuple =self.dummy_sample
_lowerCamelCase : int =0.1 * sample
_lowerCamelCase : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : str =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
_lowerCamelCase : Union[str, Any] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[int] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : List[str]=0 , **lowercase_ : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Dict =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
_lowerCamelCase : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Any =self.get_scheduler_config()
_lowerCamelCase : int =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : Optional[int] =dummy_past_residuals[:]
_lowerCamelCase : List[str] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Optional[Any] , **lowercase_ : Any ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Any =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =10
_lowerCamelCase : str =self.dummy_model()
_lowerCamelCase : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase : Union[str, Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : Any =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase : List[Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : int =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[int] =kwargs.pop('num_inference_steps' , lowercase_ )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[str] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps' ):
_lowerCamelCase : Tuple =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : List[Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCamelCase : Union[str, Any] =dummy_past_residuals[:]
_lowerCamelCase : List[Any] =scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCamelCase : Tuple =scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
_lowerCamelCase : Optional[int] =self.scheduler_classes[0]
_lowerCamelCase : Dict =self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =27
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Dict =self.dummy_sample
_lowerCamelCase : List[Any] =0.1 * sample
_lowerCamelCase : List[Any] =self.get_scheduler_config()
_lowerCamelCase : List[str] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase : Tuple =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Dict =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop()
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Dict =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Union[str, Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 464 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
A__ : Any = (UnCLIPScheduler,)
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> int:
A__ = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_UpperCamelCase )
return config
def snake_case__ ( self ) -> int:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def snake_case__ ( self ) -> Optional[Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def snake_case__ ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def snake_case__ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_UpperCamelCase )
def snake_case__ ( self ) -> Tuple:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def snake_case__ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_UpperCamelCase , prev_timestep=_UpperCamelCase )
def snake_case__ ( self ) -> Any:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(variance_type="fixed_small_log" )
A__ = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def snake_case__ ( self ) -> List[Any]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(variance_type="learned_range" )
A__ = scheduler_class(**_UpperCamelCase )
A__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=_UpperCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_UpperCamelCase ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_UpperCamelCase ) - -0.0_0_1_0_0_1_1 < 1e-5
def snake_case__ ( self ) -> int:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_UpperCamelCase )
A__ = scheduler.timesteps
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for i, t in enumerate(_UpperCamelCase ):
# 1. predict noise residual
A__ = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(_UpperCamelCase ) )
A__ = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def snake_case__ ( self ) -> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(25 )
A__ = scheduler.timesteps
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for i, t in enumerate(_UpperCamelCase ):
# 1. predict noise residual
A__ = model(_UpperCamelCase , _UpperCamelCase )
if i + 1 == timesteps.shape[0]:
A__ = None
else:
A__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , prev_timestep=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(_UpperCamelCase ) )
A__ = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def snake_case__ ( self ) -> Union[str, Any]:
pass
def snake_case__ ( self ) -> str:
pass
| 715 |
"""simple docstring"""
import numpy as np
def _lowerCamelCase ( UpperCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase ( UpperCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 562 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a__ =size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
a__ =parent
a__ =batch_size
a__ =num_channels
a__ =min_resolution
a__ =max_resolution
a__ =do_resize
a__ =size
a__ =do_normalize
a__ =image_mean
a__ =image_std
a__ =do_rescale
a__ =rescale_factor
a__ =do_pad
def __UpperCamelCase ( self) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self , lowercase_ , lowercase_=False) -> Any:
if not batched:
a__ =image_inputs[0]
if isinstance(lowercase_ , Image.Image):
a__ , a__ =image.size
else:
a__ , a__ =image.shape[1], image.shape[2]
if w < h:
a__ =int(self.size['shortest_edge'] * h / w)
a__ =self.size['shortest_edge']
elif w > h:
a__ =self.size['shortest_edge']
a__ =int(self.size['shortest_edge'] * w / h)
else:
a__ =self.size['shortest_edge']
a__ =self.size['shortest_edge']
else:
a__ =[]
for image in image_inputs:
a__ , a__ =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a__ =max(lowercase_ , key=lambda lowercase_: item[0])[0]
a__ =max(lowercase_ , key=lambda lowercase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =ConditionalDetrImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self) -> Tuple:
a__ =ConditionalDetrImageProcessingTester(self)
@property
def __UpperCamelCase ( self) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self) -> Any:
a__ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , 'image_mean'))
self.assertTrue(hasattr(lowercase_ , 'image_std'))
self.assertTrue(hasattr(lowercase_ , 'do_normalize'))
self.assertTrue(hasattr(lowercase_ , 'do_resize'))
self.assertTrue(hasattr(lowercase_ , 'size'))
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , lowercase_)
a__ =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self) -> Optional[int]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self) -> Optional[Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
a__ , a__ =self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self) -> List[Any]:
# prepare image and target
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a__ =json.loads(f.read())
a__ ={'image_id': 39769, 'annotations': target}
# encode them
a__ =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50')
a__ =image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt')
# verify pixel values
a__ =torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , lowercase_)
a__ =torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4))
# verify area
a__ =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_))
# verify boxes
a__ =torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_)
a__ =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3))
# verify image_id
a__ =torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_))
# verify is_crowd
a__ =torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_))
# verify class_labels
a__ =torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_))
# verify orig_size
a__ =torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_))
# verify size
a__ =torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_))
@slow
def __UpperCamelCase ( self) -> Dict:
# prepare image, target and masks_path
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a__ =json.loads(f.read())
a__ ={'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
a__ =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a__ =ConditionalDetrImageProcessor(format='coco_panoptic')
a__ =image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt')
# verify pixel values
a__ =torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , lowercase_)
a__ =torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4))
# verify area
a__ =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_))
# verify boxes
a__ =torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_)
a__ =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3))
# verify image_id
a__ =torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_))
# verify is_crowd
a__ =torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_))
# verify class_labels
a__ =torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_))
# verify masks
a__ =822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_)
# verify orig_size
a__ =torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_))
# verify size
a__ =torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_))
| 20 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""nat"""
a_ :int ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[2, 4, 8, 1_6] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : int=3.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1E-5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(SCREAMING_SNAKE_CASE__ )
__a = num_heads
__a = kernel_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = layer_norm_eps
__a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
__a = layer_scale_init_value
__a = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
| 582 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = 0
if start < end:
_lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = a[end]
_lowerCAmelCase = a[pivot]
_lowerCAmelCase = temp
_lowerCAmelCase , _lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ )
return count
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = a[end]
_lowerCAmelCase = a[pivot]
_lowerCAmelCase = temp
_lowerCAmelCase = start - 1
for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowerCAmelCase = new_pivot_index + 1
_lowerCAmelCase = a[new_pivot_index]
_lowerCAmelCase = a[index]
_lowerCAmelCase = temp
_lowerCAmelCase = a[new_pivot_index + 1]
_lowerCAmelCase = a[end]
_lowerCAmelCase = temp
return new_pivot_index + 1, count
_SCREAMING_SNAKE_CASE = TemporaryFile()
_SCREAMING_SNAKE_CASE = 1_00 # 1000 elements are to be sorted
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 1 # mean and standard deviation
_SCREAMING_SNAKE_CASE = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_SCREAMING_SNAKE_CASE = np.load(outfile)
_SCREAMING_SNAKE_CASE = len(M) - 1
_SCREAMING_SNAKE_CASE = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 489 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase_ :
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
pass
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFViTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFDeiTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFRobertaModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Any:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = TFCLIPVisionModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" )
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1E-3 ) )
| 489 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=UpperCamelCase):
a_ = ["torch", "scipy"]
def __init__( self : int , *_A : Optional[int] , **_A : List[str] ) -> int:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def A ( cls : Optional[int] , *_A : Optional[Any] , **_A : Optional[Any] ) -> List[str]:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def A ( cls : Optional[int] , *_A : str , **_A : Any ) -> str:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 541 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_UpperCamelCase : Tuple = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 541 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
lowercase_ = []
lowercase_ = []
lowercase_ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
lowercase_ = queue.pop(0 )
cnt += 1
topo.append(snake_case__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case__ )
if cnt != len(snake_case__ ):
print('''Cycle exists''' )
else:
print(snake_case__ )
# Adjacency List of Graph
__a = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 409 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : str = logging.get_logger(__name__)
__lowercase : Dict = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class _A ( snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = '''convnextv2'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Dict = num_channels
snake_case : List[str] = patch_size
snake_case : List[Any] = num_stages
snake_case : Union[str, Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
snake_case : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
snake_case : List[Any] = hidden_act
snake_case : Optional[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : str = drop_path_rate
snake_case : Any = image_size
snake_case : Tuple = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
snake_case , snake_case : str = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ ,out_indices=SCREAMING_SNAKE_CASE_ ,stage_names=self.stage_names )
| 36 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _lowercase ( __A ,__A ,__A ,__A=None ):
'''simple docstring'''
__UpperCamelCase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__UpperCamelCase , __UpperCamelCase = True, True
__UpperCamelCase = dfs(__A ,__A ,__A ,__A )
return path
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = -1
for i in range(__A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__UpperCamelCase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__UpperCamelCase , __UpperCamelCase = check_circuit_or_path(__A ,__A )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
__UpperCamelCase = 1
if check == 2:
__UpperCamelCase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
__UpperCamelCase = dfs(__A ,__A ,__A )
print(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__UpperCamelCase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__UpperCamelCase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__UpperCamelCase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__UpperCamelCase = {
1: [],
2: []
# all degree is zero
}
__UpperCamelCase = 10
check_euler(__A ,__A )
check_euler(__A ,__A )
check_euler(__A ,__A )
check_euler(__A ,__A )
check_euler(__A ,__A )
if __name__ == "__main__":
main()
| 601 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase: int =logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] ={
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = '''roformer'''
def __init__( self : Dict , _lowerCAmelCase : int=5_0000 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : int=12 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1536 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : Tuple , ) -> Optional[int]:
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size if embedding_size is None else embedding_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = rotary_value
_lowerCAmelCase = use_cache
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase = {0: 'batch', 1: 'sequence'}
_lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 716 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : str=30 , _lowerCAmelCase : Tuple=400 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : Dict=1 / 255 , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , _lowerCAmelCase : Any=[0.5, 0.5, 0.5] , _lowerCAmelCase : Optional[Any]=True , ) -> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_pad
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=False ) -> Union[str, Any]:
if not batched:
_lowerCAmelCase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
_lowerCAmelCase = self.size['shortest_edge']
elif w > h:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = self.size['shortest_edge']
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
_lowerCAmelCase = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'rescale_factor' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_pad' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
_lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# prepare image and target
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
# prepare image, target and masks_path
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify masks
_lowerCAmelCase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCAmelCase )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
| 585 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCAmelCase__ : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
UpperCAmelCase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
UpperCAmelCase__ : Dict = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ : str = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase__ : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : str = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : str = self.prepare_image_inputs()
UpperCAmelCase__ : str = image_processor(_lowerCAmelCase , return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = """lower newer"""
UpperCAmelCase__ : int = processor(text=_lowerCAmelCase )
UpperCAmelCase__ : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = """lower newer"""
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[int] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : Any = processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : str = """lower newer"""
UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase__ : List[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 79 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'informer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.0_5 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
UpperCAmelCase__ : List[str] = prediction_length
UpperCAmelCase__ : Optional[Any] = context_length or prediction_length
UpperCAmelCase__ : str = distribution_output
UpperCAmelCase__ : int = loss
UpperCAmelCase__ : Optional[Any] = input_size
UpperCAmelCase__ : Any = num_time_features
UpperCAmelCase__ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ : Union[str, Any] = scaling
UpperCAmelCase__ : Optional[Any] = num_dynamic_real_features
UpperCAmelCase__ : List[str] = num_static_real_features
UpperCAmelCase__ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : List[str] = cardinality
else:
UpperCAmelCase__ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : str = embedding_dimension
else:
UpperCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : int = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : int = encoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : Tuple = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : List[str] = activation_dropout
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Tuple = activation_function
UpperCAmelCase__ : Dict = init_std
UpperCAmelCase__ : str = use_cache
# Informer
UpperCAmelCase__ : Union[str, Any] = attention_type
UpperCAmelCase__ : int = sampling_factor
UpperCAmelCase__ : Any = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 79 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: complex , UpperCamelCase: str = "x" , UpperCamelCase: float = 1_0**-1_0 , UpperCamelCase: int = 1 , ):
"""simple docstring"""
__lowerCAmelCase = symbols(UpperCamelCase )
__lowerCAmelCase = lambdify(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = lambdify(UpperCamelCase , diff(UpperCamelCase , UpperCamelCase ) )
__lowerCAmelCase = starting_point
while True:
if diff_function(UpperCamelCase ) != 0:
__lowerCAmelCase = prev_guess - multiplicity * func(UpperCamelCase ) / diff_function(
UpperCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowerCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson("exp(x) - 1", 1_0, precision=0.0_0_5)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 376 |
import math
import sys
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
__lowerCAmelCase = binary_file.read()
for dat in data:
__lowerCAmelCase = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = {"0": "0", "1": "1"}
__lowerCAmelCase , __lowerCAmelCase = "", ""
__lowerCAmelCase = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCAmelCase = lexicon[curr_string]
result += last_match_id
__lowerCAmelCase = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
__lowerCAmelCase = {}
for curr_key in list(UpperCamelCase ):
__lowerCAmelCase = lexicon.pop(UpperCamelCase )
__lowerCAmelCase = new_lex
__lowerCAmelCase = last_match_id + "1"
index += 1
__lowerCAmelCase = ""
return result
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
__lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCAmelCase = data_bits[counter:]
__lowerCAmelCase = data_bits[counter + 1 :]
return data_bits
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = read_file_binary(UpperCamelCase )
__lowerCAmelCase = remove_prefix(UpperCamelCase )
__lowerCAmelCase = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 376 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_: int = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_: Optional[int] = ['PoolFormerFeatureExtractor']
A_: List[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_: Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 398 |
def a ( a ) ->List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(a )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a )]
# Reverse whole list
SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a )]
cur -= 1
return arr
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted)) | 201 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[Any] = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
lowercase : Union[str, Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
lowercase : Any = 0
lowercase : List[Any] = 1
lowercase : Union[str, Any] = 2
lowercase : Any = 3
lowercase : int = 4
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = 'left'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE : int="<sep>" , SCREAMING_SNAKE_CASE : List[str]="<pad>" , SCREAMING_SNAKE_CASE : Optional[int]="<cls>" , SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE : int=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
lowerCAmelCase = 3
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
@property
def __A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def __A ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
if self.remove_space:
lowerCAmelCase = " ".join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize("NFKD" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.preprocess_text(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE )
else:
new_pieces.append(SCREAMING_SNAKE_CASE )
return new_pieces
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = "".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def __A ( self : str , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : str , ) -> str:
"""simple docstring"""
lowerCAmelCase = kwargs.pop("use_source_tokenizer" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = []
sub_texts.append(SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCAmelCase = "".join(SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
def __A ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 159 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase : Optional[int] = random.Random()
def __a ( A__ , A__=1.0 , A__=None , A__=None ) -> Any:
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=7 , SCREAMING_SNAKE_CASE : Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE : Optional[Any]=2_0_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=1_6_0_0_0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=8_0 , SCREAMING_SNAKE_CASE : int=1_6 , SCREAMING_SNAKE_CASE : Any=6_4 , SCREAMING_SNAKE_CASE : List[Any]="hann_window" , SCREAMING_SNAKE_CASE : Dict=8_0 , SCREAMING_SNAKE_CASE : Any=7_6_0_0 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-10 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = do_normalize
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = return_attention_mask
def __A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str:
"""simple docstring"""
def _flatten(SCREAMING_SNAKE_CASE : List[Any] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=False ) -> str:
"""simple docstring"""
if equal_length:
lowerCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = SpeechTaFeatureExtractor
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = SpeechTaFeatureExtractionTester(self )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1E-3 ) )
def __A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __A ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def __A ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase = feature_extractor(audio_target=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE )
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = min(SCREAMING_SNAKE_CASE )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(
SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="np" )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , SCREAMING_SNAKE_CASE , atol=1E-6 ) )
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = feature_extractor(audio_target=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 159 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCamelCase = get_logger()
__UpperCamelCase = None
class lowerCAmelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
super().__init__(features=lowerCAmelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(lowerCAmelCase__ )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
SCREAMING_SNAKE_CASE = device if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
SCREAMING_SNAKE_CASE = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE = jnp_array_kwargs
@staticmethod
def __A ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(lowerCAmelCase__ ): device for device in jax.devices()}
def __A ( self , lowerCAmelCase__ ) -> Any:
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and column:
if all(
isinstance(lowerCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCAmelCase__ , axis=0 )
return column
def __A ( self , lowerCAmelCase__ ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase__ , (str, bytes, type(lowerCAmelCase__ )) ):
return value
elif isinstance(lowerCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE = {}
if isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE = {'dtype': jnp.intaa}
else:
SCREAMING_SNAKE_CASE = {'dtype': jnp.intaa}
elif isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE = np.asarray(lowerCAmelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self , lowerCAmelCase__ ) -> Any:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCAmelCase__ , '__array__' ) and not isinstance(lowerCAmelCase__ , jax.Array ):
SCREAMING_SNAKE_CASE = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Union[str, Any]:
return map_nested(self._recursive_tensorize , lowerCAmelCase__ , map_list=lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Mapping:
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_row(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_row(lowerCAmelCase__ )
return self.recursive_tensorize(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> "jax.Array":
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_column(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_column(lowerCAmelCase__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self._consolidate(lowerCAmelCase__ )
return column
def __A ( self , lowerCAmelCase__ ) -> Mapping:
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_batch(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(lowerCAmelCase__ )
for column_name in batch:
SCREAMING_SNAKE_CASE = self._consolidate(batch[column_name] )
return batch
| 247 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=512 , lowerCAmelCase__="cls" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = project_dim
SCREAMING_SNAKE_CASE = pooler_fn
SCREAMING_SNAKE_CASE = learn_encoder
SCREAMING_SNAKE_CASE = use_attention_mask
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [R"""pooler""", R"""logit_scale"""]
SCREAMING_SNAKE_CASE_ : List[Any] = [R"""position_ids""", R"""predictions.decoder.bias"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """roberta"""
SCREAMING_SNAKE_CASE_ : Dict = RobertaSeriesConfig
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , 'has_pre_transformation' , lowerCAmelCase__ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = outputs['hidden_states'][-2]
SCREAMING_SNAKE_CASE = self.pre_LN(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 247 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase ( _lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowerCAmelCase ( _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
class _UpperCamelCase :
def __init__( self :Optional[int] , lowerCamelCase :Union[str, Any] ) -> List[Any]:
UpperCAmelCase__ = metric_id
class _UpperCamelCase :
UpperCAmelCase_ = [MetricMock(_SCREAMING_SNAKE_CASE ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[int]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
if "tmp_path" in args:
UpperCAmelCase__ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_lowerCAmelCase , match="https://huggingface.co/docs/evaluate" ):
func(*_lowerCAmelCase )
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 | 0 |
import qiskit
def A ( lowercase__ : Any , lowercase__ : Union[str, Any] ) -> List[str]:
UpperCamelCase__ :Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ :List[str] = qiskit.QuantumCircuit(_A , _A )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase__ :Optional[Any] = qiskit.execute(_A , _A , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_A )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 45 |
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
while number > 0:
snake_case_ = number % 10
sum_of_digits += last_digit
snake_case_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase__ ( _A = 100 ):
'''simple docstring'''
snake_case_ = factorial(_A )
snake_case_ = split_and_add(_A )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 376 | 0 |
import argparse
import json
from tqdm import tqdm
def SCREAMING_SNAKE_CASE ( ):
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a_ , help='where to store parsed gold_data_path file' , )
__a = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__a = json.load(a_ )
for dpr_record in tqdm(a_ ):
__a = dpr_record['question']
__a = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a_ ) + '\n' )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__ ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def lowercase__ ( ):
'''simple docstring'''
__lowercase = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def lowercase__ ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = 1000
__lowercase = """huggingface/label-files"""
__lowercase = num_labels
__lowercase = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__lowercase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = __lowercase = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
__lowercase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
__lowercase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowercase = [2, 2, 20]
__lowercase = [3, 12, 16]
__lowercase = [192, 768, 1024]
__lowercase = CvtForImageClassification(__UpperCamelCase )
__lowercase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
__lowercase = image_size
__lowercase = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
__lowercase = OrderedDict()
__lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowercase = list_of_state_dict + cls_token(__UpperCamelCase )
__lowercase = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
__lowercase = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
__lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
snake_case : Any = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 566 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ):
'''simple docstring'''
__lowercase = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_SCREAMING_SNAKE_CASE : Dict = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''spiece.model'''}
_SCREAMING_SNAKE_CASE : int = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : str = 3
_SCREAMING_SNAKE_CASE : str = 4
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = "left"
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : List[str]="<sep>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<cls>" , __lowerCamelCase : Any="<mask>" , __lowerCamelCase : Dict=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
return len(self.sp_model )
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Any , __lowerCamelCase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
if self.remove_space:
SCREAMING_SNAKE_CASE__ = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE__ = inputs
SCREAMING_SNAKE_CASE__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE__ = unicodedata.normalize('''NFKD''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ = outputs.lower()
return outputs
def lowercase_ ( self : Any , __lowerCamelCase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.preprocess_text(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE__ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> List[Any]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : int , ) -> str:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''use_source_tokenizer''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 472 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def a (lowerCAmelCase__=None , lowerCAmelCase__=None ):
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class __UpperCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCamelCase = field(
default=__A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_lowerCamelCase = field(
default=__A , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
_lowerCamelCase = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
_lowerCamelCase = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
_lowerCamelCase = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
_lowerCamelCase = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
_lowerCamelCase = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
_lowerCamelCase = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class __UpperCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
default=__A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_lowerCamelCase = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_lowerCamelCase = field(
default=__A , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_lowerCamelCase = field(
default=__A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_lowerCamelCase = field(
default=__A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCamelCase = field(
default=__A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
_lowerCamelCase = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class __UpperCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = True
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
def __call__( self , __A ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__a = [{"""input_values""": feature["""input_values"""]} for feature in features]
__a = [{"""input_ids""": feature["""labels"""]} for feature in features]
__a = self.processor.pad(
__A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__a = self.processor.pad(
labels=__A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
__a = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__a = labels
return batch
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def snake_case_ ( self , __A , __A ):
model.train()
__a = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
__a = self.compute_loss(__A , __A )
else:
__a = self.compute_loss(__A , __A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__a = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__a = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__a = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
return loss.detach()
def a ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__a = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
__a = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
__a = f'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(lowerCAmelCase__ ):
__a = re.sub(lowerCAmelCase__ , """""" , batch["""sentence"""] ).lower() + """ """
return batch
__a = train_dataset.map(lowerCAmelCase__ , remove_columns=["""sentence"""] )
__a = eval_dataset.map(lowerCAmelCase__ , remove_columns=["""sentence"""] )
def extract_all_chars(lowerCAmelCase__ ):
__a = """ """.join(batch["""text"""] )
__a = list(set(lowerCAmelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__a = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=-1 , keep_in_memory=lowerCAmelCase__ , remove_columns=train_dataset.column_names , )
__a = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=-1 , keep_in_memory=lowerCAmelCase__ , remove_columns=eval_dataset.column_names , )
__a = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
__a = {v: k for k, v in enumerate(lowerCAmelCase__ )}
__a = vocab_dict[""" """]
del vocab_dict[" "]
__a = len(lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ )
__a = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
__a = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__a = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
__a = train_dataset.select(range(lowerCAmelCase__ ) )
if data_args.max_val_samples is not None:
__a = eval_dataset.select(range(data_args.max_val_samples ) )
__a = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase__ ):
__a , __a = torchaudio.load(batch["""path"""] )
__a = resampler(lowerCAmelCase__ ).squeeze().numpy()
__a = 16_000
__a = batch["""text"""]
return batch
__a = train_dataset.map(
lowerCAmelCase__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__a = eval_dataset.map(
lowerCAmelCase__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__a = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowerCAmelCase__ )
return batch
__a = train_dataset.map(
lowerCAmelCase__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , )
__a = eval_dataset.map(
lowerCAmelCase__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__a = datasets.load_metric("""wer""" )
def compute_metrics(lowerCAmelCase__ ):
__a = pred.predictions
__a = np.argmax(lowerCAmelCase__ , axis=-1 )
__a = processor.tokenizer.pad_token_id
__a = processor.batch_decode(lowerCAmelCase__ )
# we do not want to group tokens when computing the metrics
__a = processor.batch_decode(pred.label_ids , group_tokens=lowerCAmelCase__ )
__a = wer_metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__a = DataCollatorCTCWithPadding(processor=lowerCAmelCase__ , padding=lowerCAmelCase__ )
# Initialize our Trainer
__a = CTCTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__a = model_args.model_name_or_path
else:
__a = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__a = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
__a = train_result.metrics
__a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
__a = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("""train""" , lowerCAmelCase__ )
trainer.save_metrics("""train""" , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
__a = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__a = trainer.evaluate()
__a = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase__ )
__a = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase__ )
trainer.save_metrics("""eval""" , lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 99 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : List[str] = mock.Mock()
snake_case__ : Optional[int] = 5_0_0
snake_case__ : int = {}
snake_case__ : List[Any] = HTTPError
snake_case__ : List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : str = 5_0_0
snake_case__ : Union[str, Any] = {}
snake_case__ : Optional[int] = HTTPError
snake_case__ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : str = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ : int = tempfile.mktemp()
with open(__A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __A )
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained(__A )
finally:
os.remove(__A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __A )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : int = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowercase ( cls : str ):
snake_case__ : Union[str, Any] = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A , repo_id="test-tokenizer" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case__ : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : List[str] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[int] = CustomTokenizer(__A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
snake_case__ : Union[str, Any] = CustomTokenizerFast.from_pretrained(__A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case__ : int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__A , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _lowercase ( self : Optional[int] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ : Dict = Trie()
snake_case__ : Tuple = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__A , ["AB", "C"] )
| 297 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase( _a ):
'''simple docstring'''
snake_case_ : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
snake_case_ : bool = field(default=_a , metadata={"""help""": """Whether to SortishSamler or not."""} )
snake_case_ : bool = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
snake_case_ : bool = field(default=_a , metadata={"""help""": """whether to use adafactor"""} )
snake_case_ : Optional[float] = field(
default=_a , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
snake_case_ : Optional[float] = field(
default=_a , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
snake_case_ : Optional[float] = field(default=_a , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
snake_case_ : Optional[float] = field(
default=_a , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
snake_case_ : Optional[str] = field(
default="""linear""" , metadata={"""help""": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 709 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
A : List[Any] = TypeVar('T')
A : Dict = Union[List[T], Tuple[T, ...]]
A : Any = Union[T, List[T], Dict[str, T]]
A : Optional[int] = Union[str, bytes, os.PathLike]
| 473 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __magic_name__ ( nn.Module):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 42
SCREAMING_SNAKE_CASE__ : int = jnp.floataa
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , _lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hidden_states.shape
SCREAMING_SNAKE_CASE_ = jax.image.resize(
UpperCamelCase_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
SCREAMING_SNAKE_CASE_ = self.conv(UpperCamelCase_ )
return hidden_states
class __magic_name__ ( nn.Module):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 42
SCREAMING_SNAKE_CASE__ : Dict = jnp.floataa
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: str , _lowerCamelCase: Tuple ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE_ = self.conv(UpperCamelCase_ )
return hidden_states
class __magic_name__ ( nn.Module):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 42
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = 0.0
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[str] = jnp.floataa
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Dense(UpperCamelCase_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE_ = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE_ = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: List[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: Optional[int]=True ):
SCREAMING_SNAKE_CASE_ = hidden_states
SCREAMING_SNAKE_CASE_ = self.norma(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = nn.swish(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = self.conva(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = self.time_emb_proj(nn.swish(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(jnp.expand_dims(UpperCamelCase_ , 1 ) , 1 )
SCREAMING_SNAKE_CASE_ = hidden_states + temb
SCREAMING_SNAKE_CASE_ = self.norma(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = nn.swish(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = self.dropout(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = self.conva(UpperCamelCase_ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE_ = self.conv_shortcut(UpperCamelCase_ )
return hidden_states + residual
| 234 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> np.array:
__magic_name__ = int(np.ceil((x_end - xa) / step_size ) )
__magic_name__ = np.zeros((n + 1,) )
__magic_name__ = ya
__magic_name__ = xa
for k in range(__UpperCamelCase ):
__magic_name__ = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
__magic_name__ = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 490 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase (a__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
@property
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : int = ort.SessionOptions()
_snake_case : str = False
return options
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_snake_case : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_snake_case : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
_snake_case : Dict = '''A red cat sitting on a park bench'''
_snake_case : Tuple = np.random.RandomState(0 )
_snake_case : Dict = pipe(
prompt=lowercase__ , image=lowercase__ , mask_image=lowercase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type='''np''' , )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_snake_case : List[Any] = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_snake_case : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_snake_case : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
_snake_case : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
_snake_case : Dict = '''A red cat sitting on a park bench'''
_snake_case : int = np.random.RandomState(0 )
_snake_case : int = pipe(
prompt=lowercase__ , image=lowercase__ , mask_image=lowercase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase__ , output_type='''np''' , )
_snake_case : List[Any] = output.images
_snake_case : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_snake_case : Optional[int] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 47 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Optional[int] =9, 14 # noqa: F841
_lowerCamelCase : Dict =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : List[Any] =defaultdict(SCREAMING_SNAKE_CASE__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : Tuple =mst(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : Union[str, Any] =tuple(answer[:2] )
_lowerCamelCase : List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 464 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self : str ):
"""simple docstring"""
__lowerCAmelCase = ""
__lowerCAmelCase = ""
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 256
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = cva.imread(snake_case__ , 0 )
__lowerCAmelCase = copy.deepcopy(self.img )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
__lowerCAmelCase = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
__lowerCAmelCase = x[i] / self.k
self.sk += prk
__lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCAmelCase = int(last % last )
__lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
__lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 611 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_UpperCamelCase : Any ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_UpperCamelCase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(_snake_case )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCamelCase = image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(_snake_case )
else:
__lowerCamelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCamelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_snake_case , '''half''' ):
__lowerCamelCase = module.half()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , safety_checker=_snake_case , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 575 | 1 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
_lowerCamelCase : Any = int(input('''Enter number: ''').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 663 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = BertConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase__ = BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case ,__snake_case ,__snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 29 |
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =['image_processor', 'tokenizer']
__lowerCamelCase : Union[str, Any] ='BridgeTowerImageProcessor'
__lowerCamelCase : Optional[Any] =('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Optional[int] , __lowercase : List[Any] , __lowercase : Dict ):
'''simple docstring'''
super().__init__(__lowercase , __lowercase )
def __call__( self : Any , __lowercase : Tuple , __lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowercase : bool = True , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Union[bool, str, TruncationStrategy] = None , __lowercase : Optional[int] = None , __lowercase : int = 0 , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = True , __lowercase : Optional[Union[str, TensorType]] = None , **__lowercase : Dict , ):
'''simple docstring'''
__a = self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel_values + pixel_mask
__a = self.image_processor(
__lowercase , return_tensors=__lowercase , do_normalize=__lowercase , do_center_crop=__lowercase , **__lowercase )
encoding.update(__lowercase )
return encoding
def UpperCamelCase_ ( self : List[str] , *__lowercase : str , **__lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : int , *__lowercase : Union[str, Any] , **__lowercase : str ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 225 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =GPTSanJapaneseTokenizer
__lowerCamelCase : List[Any] =False
__lowerCamelCase : List[str] ={'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
super().setUp()
# fmt: off
__a = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCamelCase_ ( self : Dict , **__lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__a = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a , __a = self.get_input_output_texts(__lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、世界。 こんばんは、㔺界。"""
__a = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__a = tokens + [tokenizer.unk_token]
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__a = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__a = tokenizer.encode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = """こんにちは、世界。こんばんは、世界。😀"""
__a = tokenizer.encode(prefix_text + input_text )
__a = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__a = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = [1] + [0] * (len_prefix + len_text + 1)
__a = [1] * (len_prefix + len_text + 1) + [0]
__a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a = tokenizer(prefix_text + input_text ).token_type_ids
__a = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__a = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = tokenizer.encode("""あンいワ""" )
__a = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__a = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__a = tokenizer(__lowercase , padding=__lowercase )
__a = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__a = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# tokenizer has no padding token
pass
| 225 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowercase : Tuple = True
from torch.cuda.amp import autocast
lowercase : Dict = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A : Optional[bool] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A : Optional[bool] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
__A : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
__A : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
__A : Optional[float] = field(
default=0.99_9995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def A_ ( A__ , A__ ) -> Tuple:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
a__ : Dict = logging.WARNING
if model_args.verbose_logging:
a__ : List[str] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
a__ : Dict = logging.INFO
logger.setLevel(A__ )
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__A : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__A : Optional[float] = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class A__ :
"""simple docstring"""
__A : WavaVecaForPreTraining
__A : WavaVecaFeatureExtractor
__A : Union[bool, str] = "longest"
__A : Optional[int] = None
__A : Optional[int] = None
def __call__( self , lowercase) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : Dict = self.feature_extractor.pad(
lowercase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
a__ : Dict = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1])
a__ : int = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
a__ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1)).to(
torch.long)
a__ : Any = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
a__ : List[Any] = 1
a__ : Any = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
a__ : List[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowercase , min_masks=2 , )
return batch
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , lowercase=1 , lowercase=0 , lowercase=1.0 , **lowercase) -> Dict:
'''simple docstring'''
super().__init__(*lowercase , **lowercase)
a__ : List[Any] = 0
a__ : Dict = max_gumbel_temp
a__ : Optional[Any] = min_gumbel_temp
a__ : Tuple = gumbel_temp_decay
def __lowercase ( self , lowercase , lowercase) -> torch.Tensor:
'''simple docstring'''
model.train()
a__ : Optional[int] = self._prepare_inputs(lowercase)
if self.use_amp:
with autocast():
a__ : Dict = self.compute_loss(lowercase , lowercase)
else:
a__ : Optional[Any] = self.compute_loss(lowercase , lowercase)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
a__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a__ : Any = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']')
if self.args.gradient_accumulation_steps > 1:
a__ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase).backward()
elif self.use_apex:
with amp.scale_loss(lowercase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def A_ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ : Dict = parser.parse_args_into_dataclasses()
configure_logger(A__ , A__ )
# Downloading and loading a dataset from the hub.
a__ : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
a__ : str = DatasetDict()
a__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
a__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
a__ : Tuple = DatasetDict()
a__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
a__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
a__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A__ )
def prepare_dataset(A__ ):
# check that all files have the correct sampling rate
a__ , a__ : List[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
a__ : Dict = datasets.map(
A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
a__ : List[Any] = vectorized_datasets.filter(
lambda A__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A__ ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
a__ : List[str] = vectorized_datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
a__ : Any = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
a__ : Union[str, Any] = WavaVecaForPreTraining(A__ )
a__ : List[str] = DataCollatorForWavaVecaPretraining(model=A__ , feature_extractor=A__ )
a__ : Any = WavaVecaPreTrainer(
model=A__ , data_collator=A__ , args=A__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=A__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 392 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = ['''image_processor''', '''tokenizer''']
__A : Any = '''ChineseCLIPImageProcessor'''
__A : Tuple = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
a__ : Optional[Any] = kwargs.pop('feature_extractor')
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase , lowercase)
a__ : List[str] = self.image_processor
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
a__ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase)
if images is not None:
a__ : Optional[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase)
if text is not None and images is not None:
a__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) , tensor_type=lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.tokenizer.model_input_names
a__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
| 392 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : int , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
lowerCamelCase_ = features.copy()
lowerCamelCase_ = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = [jsonl_path]
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : List[Any]=("train",) ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
lowerCamelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ = JsonDatasetReader({'train': jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = JsonDatasetReader({'train': jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
if split:
lowerCamelCase_ = {split: jsonl_path}
else:
lowerCamelCase_ = """train"""
lowerCamelCase_ = {"""train""": jsonl_path, """test""": jsonl_path}
lowerCamelCase_ = tmp_path / """cache"""
lowerCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase_ = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
return json.load(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
return [json.loads(_lowerCamelCase ) for line in buffer]
class A:
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def a__ ( self : Any , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Tuple ) -> Dict:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def a__ ( self : Any , A_ : List[str] , A_ : Union[str, Any] , A_ : Dict , A_ : List[Any] , A_ : Any ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def a__ ( self : List[str] , A_ : Tuple , A_ : str , A_ : Any ) -> Dict:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def a__ ( self : Tuple , A_ : List[str] , A_ : str , A_ : Optional[Any] , A_ : List[Any] , A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a__ ( self : int , A_ : str ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def a__ ( self : Tuple , A_ : int , A_ : Optional[int] , A_ : Optional[int] , A_ : Optional[int] , A_ : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
lowerCamelCase_ = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , 'rb' , compression='infer' ) as f:
lowerCamelCase_ = f.read()
with fsspec.open(_lowercase , 'rb' , compression='infer' ) as f:
lowerCamelCase_ = f.read()
assert exported_content == original_content
| 70 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ):
def get_masked_lm_array(_lowerCamelCase : str ):
__a : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__a : Dict = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
__a : List[str] = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_array(_lowerCamelCase : str ):
__a : Optional[Any] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__a : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
__a : List[Any] = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_layer_array(_lowerCamelCase : int , _lowerCamelCase : str ):
__a : Dict = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__a : List[str] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
__a : List[str] = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_attention_layer_array(_lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Any ):
__a : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__a : Tuple = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
__a : Any = array.reshape(_lowerCamelCase )
if "kernel" in name:
__a : Tuple = array.transpose()
return torch.from_numpy(_lowerCamelCase )
print(F'''Loading model based on config from {config_path}...''' )
__a : int = BertConfig.from_json_file(_lowerCamelCase )
__a : str = BertForMaskedLM(_lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__a : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__a : BertSelfAttention = layer.attention.self
__a : List[str] = get_encoder_attention_layer_array(
_lowerCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
__a : Dict = get_encoder_attention_layer_array(
_lowerCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
__a : Optional[Any] = get_encoder_attention_layer_array(
_lowerCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
__a : Optional[Any] = get_encoder_attention_layer_array(
_lowerCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
__a : List[str] = get_encoder_attention_layer_array(
_lowerCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
__a : List[str] = get_encoder_attention_layer_array(
_lowerCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
__a : BertSelfOutput = layer.attention.output
__a : List[Any] = get_encoder_attention_layer_array(
_lowerCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
__a : Tuple = get_encoder_attention_layer_array(
_lowerCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
__a : Optional[Any] = get_encoder_layer_array(_lowerCamelCase , """_attention_layer_norm/gamma""" )
__a : List[str] = get_encoder_layer_array(_lowerCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
__a : BertIntermediate = layer.intermediate
__a : str = get_encoder_layer_array(_lowerCamelCase , """_intermediate_dense/kernel""" )
__a : str = get_encoder_layer_array(_lowerCamelCase , """_intermediate_dense/bias""" )
# Output
__a : BertOutput = layer.output
__a : Union[str, Any] = get_encoder_layer_array(_lowerCamelCase , """_output_dense/kernel""" )
__a : List[str] = get_encoder_layer_array(_lowerCamelCase , """_output_dense/bias""" )
__a : Optional[int] = get_encoder_layer_array(_lowerCamelCase , """_output_layer_norm/gamma""" )
__a : Tuple = get_encoder_layer_array(_lowerCamelCase , """_output_layer_norm/beta""" )
# Embeddings
__a : str = get_encoder_array("""_position_embedding_layer/embeddings""" )
__a : Any = get_encoder_array("""_type_embedding_layer/embeddings""" )
__a : Optional[Any] = get_encoder_array("""_embedding_norm_layer/gamma""" )
__a : Dict = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
__a : Any = model.cls.predictions.transform
__a : List[Any] = get_masked_lm_array("""dense/kernel""" )
__a : Optional[Any] = get_masked_lm_array("""dense/bias""" )
__a : Optional[int] = get_masked_lm_array("""layer_norm/gamma""" )
__a : Tuple = get_masked_lm_array("""layer_norm/beta""" )
__a : Optional[Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
__a : Tuple = BertPooler(config=_lowerCamelCase )
__a : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
__a : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(_lowerCamelCase )
# Integration test - should load without any errors ;)
__a : List[str] = BertForMaskedLM.from_pretrained(_lowerCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowercase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 581 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Union[str, Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Tuple = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : Tuple ):
__magic_name__ = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__magic_name__ = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__magic_name__ = model(_lowercase )['''last_hidden_state''']
__magic_name__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
__magic_name__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 432 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : List[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _UpperCamelCase ( lowerCAmelCase__: Dict ,lowerCAmelCase__: int ,lowerCAmelCase__: Tuple=None ) -> Union[str, Any]:
if rng is None:
SCREAMING_SNAKE_CASE_ = random.Random()
SCREAMING_SNAKE_CASE_ = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_ = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase__ ,dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def _UpperCamelCase ( lowerCAmelCase__: Tuple ,lowerCAmelCase__: str=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ids_tensor(lowerCAmelCase__ ,vocab_size=2 ,rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_ = 1
return attn_mask
@require_flax
class snake_case :
"""simple docstring"""
_a = None
_a = ()
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = inputs['input_ids'].shape[-1] // 2
SCREAMING_SNAKE_CASE_ = inputs['input_ids'][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_ = jnp.ones_like(_lowercase )
SCREAMING_SNAKE_CASE_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = pt_model_class(_lowercase ).eval()
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_lowercase, flax_model.params )
SCREAMING_SNAKE_CASE_ = flax_model.generate(_lowercase ).sequences
SCREAMING_SNAKE_CASE_ = pt_model.generate(torch.tensor(_lowercase, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 0.8
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 0.3
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
SCREAMING_SNAKE_CASE_ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
SCREAMING_SNAKE_CASE_ = 'Hello world'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase, return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase, 'do_samples' ):
model.generate(_lowercase, do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase, 'foo' ):
SCREAMING_SNAKE_CASE_ = {'foo': 'bar'}
model.generate(_lowercase, **_lowercase )
| 294 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 10**9 ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 311 |
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
if not sentence:
return ""
SCREAMING_SNAKE_CASE_ : int = dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 311 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCAmelCase = spec.loader.load_module()
_lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowerCAmelCase = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def lowercase ( ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase_: Tuple = False
# source code of `config_class`
UpperCAmelCase_: Optional[int] = inspect.getsource(_a )
UpperCAmelCase_: List[Any] = _re_checkpoint.findall(_a )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase_ , UpperCAmelCase_: int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase_: List[Any] = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase_: Optional[Any] = True
break
UpperCAmelCase_: Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_a )
if len(_a ) > 0:
UpperCAmelCase_: Optional[Any] = "\n".join(sorted(_a ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 137 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowercase ( _a=None ,_a=None ) -> List[Any]:
return field(default_factory=lambda: default ,metadata=_a )
@dataclass
class UpperCAmelCase__ :
snake_case_ = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
snake_case_ = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
snake_case_ = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Benchmark training of model'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Verbose memory tracing'''} )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Trace memory line by line'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save result to a CSV file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save all print statements in a log file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Whether to print environment information'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
snake_case_ = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
snake_case_ = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
snake_case_ = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
snake_case_ = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
snake_case_ = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
snake_case_ = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
snake_case_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A__ , )
def snake_case_ ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case_ ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True | 137 | 1 |
'''simple docstring'''
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 718 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 0 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
a_ = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
a_ = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
a_ = remove_duplicates(key.upper() )
a_ = len(__lowerCamelCase )
# First fill cipher with key characters
a_ = {alphabet[i]: char for i, char in enumerate(__lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowerCamelCase ) , 2_6 ):
a_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a_ = alphabet[i - offset]
a_ = char
return cipher_alphabet
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
return "".join(cipher_map.get(__lowerCamelCase , __lowerCamelCase ) for ch in message.upper() )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
"""simple docstring"""
a_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowerCamelCase , __lowerCamelCase ) for ch in message.upper() )
def a ( ) -> Dict:
"""simple docstring"""
a_ = input('Enter message to encode or decode: ' ).strip()
a_ = input('Enter keyword: ' ).strip()
a_ = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
a_ = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
a_ = create_cipher_map(__lowerCamelCase )
print(func(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["audio_values", "audio_mask"]
def __init__( self , snake_case=2_0_4_8 , snake_case=1 , snake_case=[1_6, 1_6] , snake_case=1_2_8 , snake_case=4_4_1_0_0 , snake_case=8_6 , snake_case=2_0_4_8 , snake_case=0.0 , **snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case , )
_UpperCAmelCase : Dict =spectrogram_length
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : Optional[int] =patch_size
_UpperCAmelCase : List[str] =feature_size // self.patch_size[1]
_UpperCAmelCase : Optional[int] =n_fft
_UpperCAmelCase : int =sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : int =sampling_rate
_UpperCAmelCase : List[Any] =padding_value
_UpperCAmelCase : Optional[int] =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ).T
def lowerCAmelCase ( self , snake_case) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Any =spectrogram(
snake_case , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
_UpperCAmelCase : Any =log_spec[:, :-1]
_UpperCAmelCase : List[str] =log_spec - 20.0
_UpperCAmelCase : Union[str, Any] =np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = False , **snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCAmelCase : Tuple =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_UpperCAmelCase : str =is_batched_numpy or (
isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCAmelCase : List[str] =[np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray):
_UpperCAmelCase : List[Any] =np.asarray(snake_case , dtype=np.floataa)
elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCAmelCase : List[Any] =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase : Dict =[np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : List[Any] =[
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case):
_UpperCAmelCase : Dict =[np.asarray(snake_case , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Tuple =max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Any =[
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : int =np.array(snake_case).astype(np.floataa)
# convert into correct format for padding
_UpperCAmelCase : List[Any] =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : List[str] =np.ones([len(snake_case), 1, max_time_len, self.feature_size]).astype(np.floataa)
_UpperCAmelCase : Any =padded_audio_features * self.padding_value
for i in range(len(snake_case)):
_UpperCAmelCase : List[str] =audio_features[i]
_UpperCAmelCase : Dict =feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : List[str] ={'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_UpperCAmelCase : Optional[int] ={'audio_values': padded_audio_features}
_UpperCAmelCase : Any =BatchFeature(data=snake_case , tensor_type=snake_case)
return encoded_inputs
| 446 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MobileBertTokenizer
SCREAMING_SNAKE_CASE__ = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = filter_non_english
SCREAMING_SNAKE_CASE__ = '''google/mobilebert-uncased'''
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase : Dict = "UNwant\u00E9d,running"
UpperCAmelCase : Dict = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Dict = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = "UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.tokenize(lowercase )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowercase )
UpperCAmelCase : Tuple = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# With lower casing
UpperCAmelCase : Dict = self.get_tokenizer(do_lower_case=lowercase )
UpperCAmelCase : Dict = self.get_rust_tokenizer(do_lower_case=lowercase )
UpperCAmelCase : int = "UNwant\u00E9d,running"
UpperCAmelCase : Any = tokenizer.tokenize(lowercase )
UpperCAmelCase : List[str] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : str = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : Tuple = self.get_rust_tokenizer()
UpperCAmelCase : str = tokenizer.encode(lowercase )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : int = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : int = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Any = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=lowercase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase : Any = {}
for i, token in enumerate(lowercase ):
UpperCAmelCase : int = i
UpperCAmelCase : int = WordpieceTokenizer(vocab=lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : int = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
UpperCAmelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=lowercase )
UpperCAmelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Optional[int] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
UpperCAmelCase : Dict = tokenizer_r.do_lower_case if hasattr(lowercase , "do_lower_case" ) else False
UpperCAmelCase : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = ["的", "人", "有"]
UpperCAmelCase : List[str] = "".join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : int = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : List[Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(lowercase )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Tuple = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : List[str] = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(lowercase )
UpperCAmelCase : List[str] = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
| 292 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Dict = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 292 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> Union[str, Any]:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) )
else:
return a * actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(snake_case_ , snake_case_ )
return actual_power(snake_case_ , snake_case_ )
if __name__ == "__main__":
print(power(-2, -3))
| 78 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
import torch
from diffusers import StableDiffusionPipeline
_A = '''path-to-your-trained-model'''
_A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_A = '''A photo of sks dog in a bucket'''
_A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 708 |
import torch
from transformers import AutoModel
class A ( torch.nn.Module ):
def __init__( self, UpperCamelCase__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(UpperCamelCase__, self ).__init__()
lowerCAmelCase_ = AutoModel.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ )
lowerCAmelCase_ = torch.nn.CosineSimilarity(3, 1E-08 )
lowerCAmelCase_ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return self.bert(**UpperCamelCase__ ).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = W_supports['''sizes'''].tolist()
lowerCAmelCase_ = W_supports['''start_token_id'''].item()
lowerCAmelCase_ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase_ = self.BERT(**UpperCamelCase__ )
lowerCAmelCase_ = self.BERT(**UpperCamelCase__ )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = W_supports['''input_ids'''] == start_token_id
lowerCAmelCase_ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(UpperCamelCase__ ):
if i == 0:
lowerCAmelCase_ = 0
else:
lowerCAmelCase_ = support_sizes[i - 1]
lowerCAmelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase_ = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase_ = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase_ = torch.vstack((p_starts, p_start) )
lowerCAmelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase_ = p_start
lowerCAmelCase_ = p_end
return p_starts, p_ends
| 325 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__snake_case =re.compile(R"""\s+""")
def a_ ( lowerCamelCase : Union[str, Any] ):
return {"hash": hashlib.mda(re.sub(lowercase__ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = [len(lowercase__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(lowercase__ ), "line_max": max(lowercase__ )}
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Any=5 ):
lowerCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
lowerCAmelCase = example['content'].splitlines()
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]=5 , lowerCamelCase : List[Any]=0.05 ):
lowerCAmelCase = ['unit tests', 'test file', 'configuration file']
lowerCAmelCase = example['content'].splitlines()
lowerCAmelCase = 0
lowerCAmelCase = 0
# first test
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCAmelCase = example['content'].count('\n' )
lowerCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ['def ', 'class ', 'for ', 'while ']
lowerCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( lowerCamelCase : Any , lowerCamelCase : Tuple=4 ):
lowerCAmelCase = example['content'].splitlines()
lowerCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = tokenizer(example['content'] , truncation=lowercase__ )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(lowercase__ )
return {"ratio": ratio}
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
results.update(get_hash(lowercase__ ) )
results.update(line_stats(lowercase__ ) )
results.update(alpha_stats(lowercase__ ) )
results.update(char_token_ratio(lowercase__ ) )
results.update(is_autogenerated(lowercase__ ) )
results.update(is_config_or_test(lowercase__ ) )
results.update(has_no_keywords(lowercase__ ) )
results.update(has_few_assignments(lowercase__ ) )
return results
def a_ ( lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : str ):
if not check_uniques(lowercase__ , lowercase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( lowerCamelCase : Optional[int] ):
with open(lowercase__ , 'rb' ) as f_in:
with gzip.open(str(lowercase__ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowercase__ , lowercase__ )
os.unlink(lowercase__ )
# Settings
__snake_case =HfArgumentParser(PreprocessingArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__snake_case =time.time()
__snake_case =ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__snake_case =set(ds.unique("""hash"""))
__snake_case =len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__snake_case =time.time()
__snake_case =ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__snake_case =time.time()
__snake_case , __snake_case =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__snake_case =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__snake_case =output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__snake_case =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__snake_case =str(data_dir / F'''file-{file_number+1:012}.json''')
__snake_case =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 133 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : str , *UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : Union[str, Any] ) -> str:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = eval_examples
__SCREAMING_SNAKE_CASE = post_process_function
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = "eval" ) -> str:
__SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
__SCREAMING_SNAKE_CASE = self.get_eval_dataloader(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE = self.compute_metrics
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__SCREAMING_SNAKE_CASE = time.time()
try:
__SCREAMING_SNAKE_CASE = eval_loop(
UpperCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
__SCREAMING_SNAKE_CASE = compute_metrics
__SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__SCREAMING_SNAKE_CASE = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
__SCREAMING_SNAKE_CASE = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__SCREAMING_SNAKE_CASE = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
else:
__SCREAMING_SNAKE_CASE = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = "test" ) -> Dict:
__SCREAMING_SNAKE_CASE = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE = self.compute_metrics
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__SCREAMING_SNAKE_CASE = time.time()
try:
__SCREAMING_SNAKE_CASE = eval_loop(
UpperCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
__SCREAMING_SNAKE_CASE = compute_metrics
__SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__SCREAMING_SNAKE_CASE = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , "predict" )
__SCREAMING_SNAKE_CASE = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__SCREAMING_SNAKE_CASE = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
| 700 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
a__ : List[str] = 6_37_81_37.0
a__ : Tuple = 6_35_67_52.31_42_45
a__ : str = 6_3_7_8_1_3_7
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
# Equation
__SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 )
__SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(lowerCAmelCase_ ) * cos(lowerCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileViTFeatureExtractor']
__UpperCAmelCase = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= 48
UpperCAmelCase_= """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= 60
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_= 1
UpperCAmelCase_= 1
UpperCAmelCase_= 1_26
UpperCAmelCase_= 7
UpperCAmelCase_= 255.0
UpperCAmelCase_= """"""
return config
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase_= name.replace("""patch_embed.norm""" ,"""embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCAmelCase_= name.replace("""layers""" ,"""encoder.stages""" )
if "residual_group.blocks" in name:
UpperCAmelCase_= name.replace("""residual_group.blocks""" ,"""layers""" )
if "attn.proj" in name:
UpperCAmelCase_= name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
UpperCAmelCase_= name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
UpperCAmelCase_= name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
UpperCAmelCase_= name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase_= name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase_= name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
UpperCAmelCase_= name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
UpperCAmelCase_= name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
UpperCAmelCase_= name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
UpperCAmelCase_= name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""patch_embed.projection""" )
if name == "norm.weight":
UpperCAmelCase_= """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase_= """layernorm.bias"""
if "conv_first" in name:
UpperCAmelCase_= name.replace("""conv_first""" ,"""first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_= name.replace("""conv_last""" ,"""final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_= name.replace("""conv_before_upsample.0""" ,"""conv_before_upsample""" )
if "upsample.0" in name:
UpperCAmelCase_= name.replace("""upsample.0""" ,"""upsample.convolution_0""" )
if "upsample.2" in name:
UpperCAmelCase_= name.replace("""upsample.2""" ,"""upsample.convolution_1""" )
UpperCAmelCase_= """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_= name.replace("""upsample.0.weight""" ,"""upsample.conv.weight""" )
UpperCAmelCase_= name.replace("""upsample.0.bias""" ,"""upsample.conv.bias""" )
else:
pass
else:
UpperCAmelCase_= """swin2sr.""" + name
return name
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= int(key_split[1] )
UpperCAmelCase_= int(key_split[4] )
UpperCAmelCase_= config.embed_dim
if "weight" in key:
UpperCAmelCase_= val[:dim, :]
UpperCAmelCase_= val[dim : dim * 2, :]
UpperCAmelCase_= val[-dim:, :]
else:
UpperCAmelCase_= val[:dim]
UpperCAmelCase_= val[dim : dim * 2]
UpperCAmelCase_= val[-dim:]
pass
else:
UpperCAmelCase_= val
return orig_state_dict
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_= get_config(lowerCAmelCase_ )
UpperCAmelCase_= SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_= torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_= """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
UpperCAmelCase_= SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_= 1_26 if """Jpeg""" in checkpoint_url else 2_56
UpperCAmelCase_= Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_= transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_= pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_= model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowerCAmelCase_ ,atol=1E-3 )
print("""Looks ok!""" )
UpperCAmelCase_= {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCAmelCase_= url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 593 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Any =pipeline(
task='''text-classification''', model='''hf-internal-testing/tiny-random-distilbert''', framework='''pt''' )
snake_case : Tuple =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
snake_case : List[Any] =text_classifier('''This is great !''', top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ), [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
snake_case : Any =text_classifier(['''This is great !''', '''This is bad'''], top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ), [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
], )
snake_case : str =text_classifier('''This is great !''', top_k=1 )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
snake_case : Tuple =text_classifier('''This is great !''', return_all_scores=_snake_case )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
snake_case : List[Any] =text_classifier('''This is great !''', return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
snake_case : str =text_classifier(['''This is great !''', '''Something else'''], return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
], )
snake_case : List[Any] =text_classifier(['''This is great !''', '''Something else'''], return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
], )
@require_torch
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
import torch
snake_case : int =pipeline(
task='''text-classification''', model='''hf-internal-testing/tiny-random-distilbert''', framework='''pt''', device=torch.device('''cpu''' ), )
snake_case : Tuple =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Union[str, Any] =pipeline(
task='''text-classification''', model='''hf-internal-testing/tiny-random-distilbert''', framework='''tf''' )
snake_case : Optional[int] =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __snake_case ( self : List[Any] ):
'''simple docstring'''
snake_case : str =pipeline('''text-classification''' )
snake_case : List[str] =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
snake_case : List[Any] =text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
snake_case : List[Any] =text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Optional[Any] =pipeline('''text-classification''', framework='''tf''' )
snake_case : Optional[Any] =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
snake_case : Any =text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
snake_case : str =text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __snake_case ( self : Optional[Any], _snake_case : Optional[Any], _snake_case : Optional[Any], _snake_case : Tuple ):
'''simple docstring'''
snake_case : Optional[Any] =TextClassificationPipeline(model=_snake_case, tokenizer=_snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __snake_case ( self : Tuple, _snake_case : Union[str, Any], _snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : int =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case : int ='''HuggingFace is in'''
snake_case : Optional[int] =text_classifier(_snake_case )
self.assertEqual(nested_simplify(_snake_case ), [{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
snake_case : Optional[int] =['''HuggingFace is in ''', '''Paris is in France''']
snake_case : int =text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}, {'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}], )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case : Tuple =text_classifier(_snake_case, top_k=_snake_case )
snake_case : Any =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_snake_case ), [[{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] * N, [{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}] * N], )
snake_case : Optional[Any] ={'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
snake_case : Dict =text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), {'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}, )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case : Optional[Any] =[['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(_snake_case ):
text_classifier(_snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case : Optional[Any] =text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(_snake_case ), [{'''label''': ANY(_snake_case ), '''score''': ANY(_snake_case )}], )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 136 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase_ :
def __init__( self : List[str], _snake_case : int ):
'''simple docstring'''
snake_case : Optional[Any] =value
snake_case : Node | None =None
snake_case : Node | None =None
class lowerCAmelCase_ :
def __init__( self : Tuple, _snake_case : Node ):
'''simple docstring'''
snake_case : Optional[int] =tree
def __snake_case ( self : str, _snake_case : Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : List[str] ={
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase=None , __lowercase=None , *__lowercase , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
if config is None:
assert isinstance(self.model , __lowercase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
a__ : Optional[Any] = self.model.config
else:
a__ : Optional[Any] = config
a__ : List[str] = data_args
a__ : str = self.config.tgt_vocab_size if isinstance(self.config , __lowercase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
a__ : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ : Union[str, Any] = label_smoothed_nll_loss
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if self.optimizer is None:
a__ : Dict = ["""bias""", """LayerNorm.weight"""]
a__ : Optional[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
a__ : Tuple = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ : Any = Adafactor
a__ : Optional[Any] = {"""scale_parameter""": False, """relative_step""": False}
else:
a__ : int = AdamW
a__ : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
a__ : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
a__ : List[str] = OSS(
params=__lowercase , optim=__lowercase , **__lowercase , )
else:
a__ : Any = optimizer_cls(__lowercase , **__lowercase )
if self.lr_scheduler is None:
a__ : int = self._get_lr_scheduler(__lowercase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ : Tuple = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
a__ : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowercase )
return scheduler
def SCREAMING_SNAKE_CASE__( self ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ : Tuple = model(**__lowercase , use_cache=__lowercase )[0]
a__ : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ : int = model(**__lowercase , labels=__lowercase , use_cache=__lowercase )[:2]
else:
# compute label smoothed loss
a__ : Optional[Any] = model(**__lowercase , use_cache=__lowercase )[0]
a__ : List[str] = torch.nn.functional.log_softmax(__lowercase , dim=-1 )
a__ , a__ : List[str] = self.loss_fn(__lowercase , __lowercase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : int = inputs.pop("""labels""" )
a__ , a__ : Any = self._compute_loss(__lowercase , __lowercase , __lowercase )
return loss
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
a__ : Any = self._prepare_inputs(__lowercase )
a__ : Optional[int] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ : Any = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowercase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ : Any = self._pad_tensors_to_max_len(__lowercase , gen_kwargs["""max_length"""] )
a__ : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
a__ , a__ : List[Any] = self._compute_loss(__lowercase , __lowercase , __lowercase )
a__ : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ : List[str] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ : str = self._pad_tensors_to_max_len(__lowercase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
a__ : Union[str, Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
a__ : List[str] = tensor
return padded_tensor
| 136 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[str] ="platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :Tuple = PegasusConfig
__lowerCAmelCase :List[str] = {}
__lowerCAmelCase :Optional[Any] = "gelu"
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=False , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=2_0 , __lowercase=2 , __lowercase=1 , __lowercase=0 , ) -> Any:
"""simple docstring"""
a__ : str = parent
a__ : List[str] = batch_size
a__ : Optional[int] = seq_length
a__ : Tuple = is_training
a__ : List[str] = use_labels
a__ : Dict = vocab_size
a__ : Tuple = hidden_size
a__ : Union[str, Any] = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Union[str, Any] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : List[str] = max_position_embeddings
a__ : Optional[int] = eos_token_id
a__ : List[str] = pad_token_id
a__ : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a__ : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a__ : str = prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = 2_0
a__ : Union[str, Any] = model_class_name(__lowercase )
a__ : Dict = model.encode(inputs_dict["""input_ids"""] )
a__ , a__ : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
a__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase )
a__ : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
a__ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , )
a__ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
a__ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowercase , )
a__ : int = model.decode(__lowercase , __lowercase )
a__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : int = 2_0
a__ : Optional[int] = model_class_name(__lowercase )
a__ : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
a__ , a__ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
a__ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a__ : Dict = model.init_cache(decoder_input_ids.shape[0] , __lowercase , __lowercase )
a__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a__ : List[str] = model.decode(
decoder_input_ids[:, :-1] , __lowercase , decoder_attention_mask=__lowercase , past_key_values=__lowercase , decoder_position_ids=__lowercase , )
a__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
a__ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , __lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowercase , decoder_position_ids=__lowercase , )
a__ : Union[str, Any] = model.decode(__lowercase , __lowercase , decoder_attention_mask=__lowercase )
a__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int=None , _lowercase : str=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
a__ : int = np.not_equal(_lowercase , config.pad_token_id).astype(np.inta)
if decoder_attention_mask is None:
a__ : List[str] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__lowerCAmelCase :List[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__lowerCAmelCase :List[Any] = True
__lowerCAmelCase :str = False
__lowerCAmelCase :List[str] = False
__lowerCAmelCase :Any = False
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = FlaxPegasusModelTester(self )
a__ : Tuple = ConfigTester(self , config_class=__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ : Optional[int] = self._prepare_for_class(__lowercase , __lowercase )
a__ : Tuple = model_class(__lowercase )
@jax.jit
def encode_jitted(__lowercase , __lowercase=None , **__lowercase ):
return model.encode(input_ids=__lowercase , attention_mask=__lowercase )
with self.subTest("""JIT Enabled""" ):
a__ : Any = encode_jitted(**__lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
a__ : Tuple = encode_jitted(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for jitted_output, output in zip(__lowercase , __lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ : int = model_class(__lowercase )
a__ : Optional[Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
a__ : List[str] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowercase , __lowercase , __lowercase ):
return model.decode(
decoder_input_ids=__lowercase , decoder_attention_mask=__lowercase , encoder_outputs=__lowercase , )
with self.subTest("""JIT Enabled""" ):
a__ : Any = decode_jitted(**__lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
a__ : int = decode_jitted(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for jitted_output, output in zip(__lowercase , __lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
a__ : Any = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowercase )
a__ : Optional[int] = np.ones((1, 1) )
a__ : Union[str, Any] = model(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
a__ : Optional[int] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
a__ : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
a__ : Optional[Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
a__ : Union[str, Any] = tokenizer(__lowercase , return_tensors="""np""" , truncation=__lowercase , max_length=5_1_2 , padding=__lowercase )
a__ : Union[str, Any] = model.generate(**__lowercase , num_beams=2 ).sequences
a__ : Union[str, Any] = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
assert tgt_text == decoded
| 136 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a :int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a :List[str] = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
SCREAMING_SNAKE_CASE__ : Tuple = self.diffusers_dir
shutil.copy(
os.path.join(__UpperCamelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _a ( self , _a , _a , _a , _a=None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE__ : str = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE__ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE__ : List[Any] = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(__UpperCamelCase , """w""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , """r""" ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , __UpperCamelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE__ : int = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , __UpperCamelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , __UpperCamelCase ) , )
| 718 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
'''simple docstring'''
def __init__( self , _a , _a=99 , _a=13 , _a=7 , _a=9 , _a=True , _a=True , _a=False , _a=32 , _a=5 , _a=4 , _a=37 , _a=8 , _a=0.1 , _a=0.002 , _a=1 , _a=0 , _a=0 , _a=None , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = encoder_seq_length
SCREAMING_SNAKE_CASE__ : str = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[int] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = d_ff
SCREAMING_SNAKE_CASE__ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = decoder_layers
def _a ( self ) -> Tuple:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _a ( self , _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_inputs_dict(_a , _a , _a )
return config, input_dict
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = UMTaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(
input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(input_ids=_a , decoder_input_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = result.last_hidden_state
SCREAMING_SNAKE_CASE__ : Dict = result.past_key_values
SCREAMING_SNAKE_CASE__ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : str = model(_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def _a ( self , _a , _a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).to(_a ).half().eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE :Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[str] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :Tuple = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = True
_SCREAMING_SNAKE_CASE :List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_SCREAMING_SNAKE_CASE :Union[str, Any] = [0.8, 0.9]
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs[0]
SCREAMING_SNAKE_CASE__ : Tuple = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
}
for attn_name, (name, mask) in zip(_a , head_masking.items() ):
SCREAMING_SNAKE_CASE__ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE__ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _a ( self ) -> Dict:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids.to(_a ) )
SCREAMING_SNAKE_CASE__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_a )
self.assertEqual(_a , _a )
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Any = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 51 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase_ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 701 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[Any] ="""bridgetower_vision_model"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=288 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= hidden_size
_A= num_hidden_layers
_A= num_channels
_A= patch_size
_A= image_size
_A= initializer_factor
_A= layer_norm_eps
_A= stop_gradient
_A= share_layernorm
_A= remove_last_layer
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A, _A= cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
_A= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[Any] ="""bridgetower_text_model"""
def __init__( self , lowerCAmelCase__=50265 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=514 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= vocab_size
_A= hidden_size
_A= num_hidden_layers
_A= num_attention_heads
_A= hidden_act
_A= initializer_factor
_A= intermediate_size
_A= hidden_dropout_prob
_A= attention_probs_dropout_prob
_A= max_position_embeddings
_A= type_vocab_size
_A= layer_norm_eps
_A= position_embedding_type
_A= use_cache
_A= pad_token_id
_A= bos_token_id
_A= eos_token_id
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A, _A= cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
_A= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Dict ="""bridgetower"""
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=768 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__="add" , lowerCAmelCase__=12 , lowerCAmelCase__=6 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
# TODO: remove this once the Hub files are updated.
_A= kwargs.pop('text_config_dict' , lowerCAmelCase__ )
_A= kwargs.pop('vision_config_dict' , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
_A= share_cross_modal_transformer_layers
_A= hidden_act
_A= hidden_size
_A= initializer_factor
_A= layer_norm_eps
_A= share_link_tower_layers
_A= link_tower_type
_A= num_attention_heads
_A= num_hidden_layers
_A= tie_word_embeddings
_A= init_layernorm_from_vision_encoder
if text_config is None:
_A= {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
_A= {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
_A= BridgeTowerTextConfig(**lowerCAmelCase__ )
_A= BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def a__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def a__ ( self ):
_A= copy.deepcopy(self.__dict__ )
_A= self.text_config.to_dict()
_A= self.vision_config.to_dict()
_A= self.__class__.model_type
return output | 476 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase = logging.get_logger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.task_name.lower()
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """train"""
lowerCAmelCase__ = """dev"""
lowerCAmelCase__ = """test"""
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _lowerCAmelCase , )
__lowercase =args
__lowercase =glue_processors[args.task_name]()
__lowercase =glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
try:
__lowercase =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
__lowercase =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
__lowercase =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase =label_list[2], label_list[1]
__lowercase =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase =cached_features_file + '.lock'
with FileLock(_lowerCAmelCase):
if os.path.exists(_lowerCAmelCase) and not args.overwrite_cache:
__lowercase =time.time()
__lowercase =torch.load(_lowerCAmelCase)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
__lowercase =self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
__lowercase =self.processor.get_test_examples(args.data_dir)
else:
__lowercase =self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
__lowercase =examples[:limit_length]
__lowercase =glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase =time.time()
torch.save(self.features , _lowerCAmelCase)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : int):
'''simple docstring'''
return len(self.features)
def __getitem__( self : List[str] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.label_list
| 474 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = tf.data.AUTOTUNE
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_lowerCAmelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_lowerCAmelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_lowerCAmelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_lowerCAmelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_lowerCAmelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_lowerCAmelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_lowerCAmelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_lowerCAmelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_lowerCAmelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_lowerCAmelCase , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_lowerCAmelCase , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_lowerCAmelCase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_lowerCAmelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_lowerCAmelCase , help='Model ID to upload to on the Hugging Face Hub.' )
__lowercase =parser.parse_args()
return args
def _A ( _lowerCAmelCase ):
"""simple docstring"""
try:
if args.tpu_name:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_lowerCAmelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCAmelCase )
return tpu
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
for file in file_list:
__lowercase =file.split('/' )[-1]
__lowercase =re.search(r'-\d+-(\d+)\.tfrecord' , _lowerCAmelCase ).group(1 )
__lowercase =int(_lowerCAmelCase )
num_samples += sample_count
return num_samples
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =tf.data.Dataset.from_tensor_slices(_lowerCAmelCase )
if shuffle:
__lowercase =dataset.shuffle(len(_lowerCAmelCase ) )
__lowercase =tf.data.TFRecordDataset(_lowerCAmelCase , num_parallel_reads=_lowerCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase =dataset.apply(tf.data.experimental.assert_cardinality(_lowerCAmelCase ) )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase =dataset.shuffle(args.shuffle_buffer_size )
__lowercase =dataset.batch(_lowerCAmelCase , drop_remainder=_lowerCAmelCase )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
__lowercase =dataset.prefetch(_lowerCAmelCase )
return dataset
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not args.no_tpu:
__lowercase =initialize_tpu(_lowerCAmelCase )
__lowercase =tf.distribute.TPUStrategy(_lowerCAmelCase )
else:
__lowercase =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__lowercase =AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase =AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase =tokenizer.vocab_size
__lowercase =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
__lowercase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase =steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase =TFAutoModelForMaskedLM.from_config(_lowerCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase , __lowercase =create_optimizer(
num_train_steps=_lowerCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCAmelCase , metrics=['accuracy'] )
def decode_fn(_lowerCAmelCase ):
__lowercase ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCAmelCase , _lowerCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase =DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm_probability=args.mlm_probability , mlm=_lowerCAmelCase , return_tensors='tf' )
def mask_with_collator(_lowerCAmelCase ):
# TF really needs an isin() function
__lowercase =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__lowercase , __lowercase =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_lowerCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCAmelCase , )
return batch
__lowercase =args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , )
__lowercase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCAmelCase ) )
model.fit(
_lowerCAmelCase , validation_data=_lowerCAmelCase , epochs=args.num_epochs , callbacks=_lowerCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase = parse_args()
main(args)
| 474 | 1 |
'''simple docstring'''
_lowerCamelCase : Dict = 8.3_144_598
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCamelCase : int = 3_00
_lowerCamelCase : int = 28
_lowerCamelCase : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s") | 512 | '''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_lowerCamelCase : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_lowerCamelCase : List[Any] = [0, 25, 50]
_lowerCamelCase : Dict = [25, 50, 75]
_lowerCamelCase : Optional[Any] = fuzz.membership.trimf(X, abca)
_lowerCamelCase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_lowerCamelCase : Optional[Any] = np.ones(75)
_lowerCamelCase : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_lowerCamelCase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_lowerCamelCase : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_lowerCamelCase : List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_lowerCamelCase : Any = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_lowerCamelCase : Tuple = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_lowerCamelCase : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_lowerCamelCase : str = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_lowerCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 512 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_A : Any = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Any = ["""input_features""", """attention_mask"""]
def __init__( self , A_=80 , A_=1_60_00 , A_=80 , A_=0.0 , A_=True , A_=True , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = do_ceptral_normalize
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = True
def lowercase_ ( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE__ = torch.from_numpy(A_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = ta_kaldi.fbank(A_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowercase_ ( A_ , A_ , A_ = True , A_ = True , A_ = 0.0 , ):
'''simple docstring'''
if normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(A_ , A_ )
if normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(A_ , A_ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(A_ , A_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(A_ , A_ )
]
def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_fbank_features(A_ ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE__ = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , A_ ):
SCREAMING_SNAKE_CASE__ = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE__ = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=A_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 100 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Dict , __A : Path , __A : Union[str, None] = None , __A : Union[List[str], None] = None , __A : Union[str, List[str], None] = None , __A : bool = True , ) ->Any:
"""simple docstring"""
a__ :Dict = [file for file in os.listdir(__A ) if os.path.isfile(os.path.join(__A , __A ) )]
if identifier is not None:
a__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__A , __A ):
for n_ in n_identifier:
a__ :Union[str, Any] = [file for file in files if n_ not in file]
else:
a__ :Dict = [file for file in files if n_identifier not in file]
a__ :List[str] = ignore_files or []
ignore_files.append("__init__.py" )
a__ :Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __A )
if only_modules:
a__ :Tuple = file.split("." )[0]
try:
a__ :Dict = getattr(__A , __A )
a__ :int = doctest.DocTestSuite(__A )
a__ :Any = unittest.TextTestRunner().run(__A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
a__ :int = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self : int ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = Path("src/transformers" )
a__ :Union[str, Any] = "modeling"
a__ :Any = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__A , identifier=__A , ignore_files=__A )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = Path("src/transformers" )
a__ :Dict = "tokenization"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[Any] = Path("src/transformers" )
a__ :List[Any] = "configuration"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = Path("src/transformers" )
a__ :str = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__A , n_identifier=__A )
def _snake_case ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[str] = Path("docs/source" )
a__ :Union[str, Any] = ["favicon.ico"]
self.analyze_directory(__A , ignore_files=__A , only_modules=__A )
| 395 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'perceiver'
def __init__( self : Union[str, Any] , lowerCamelCase__ : str=256 , lowerCamelCase__ : str=1_280 , lowerCamelCase__ : List[str]=768 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : int=26 , lowerCamelCase__ : Any=8 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]="kv" , lowerCamelCase__ : str=1 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Optional[int]=1E-12 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[Any]=262 , lowerCamelCase__ : Dict=2_048 , lowerCamelCase__ : str=56 , lowerCamelCase__ : List[str]=[368, 496] , lowerCamelCase__ : str=16 , lowerCamelCase__ : Union[str, Any]=1_920 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : List[Any]=[1, 16, 224, 224] , **lowerCamelCase__ : List[str] , ):
super().__init__(**lowerCamelCase__ )
a__ : Optional[Any] = num_latents
a__ : str = d_latents
a__ : str = d_model
a__ : Tuple = num_blocks
a__ : List[str] = num_self_attends_per_block
a__ : List[Any] = num_self_attention_heads
a__ : List[Any] = num_cross_attention_heads
a__ : Tuple = qk_channels
a__ : List[Any] = v_channels
a__ : Union[str, Any] = cross_attention_shape_for_attention
a__ : List[str] = self_attention_widening_factor
a__ : int = cross_attention_widening_factor
a__ : Dict = hidden_act
a__ : int = attention_probs_dropout_prob
a__ : Any = initializer_range
a__ : Union[str, Any] = layer_norm_eps
a__ : Dict = use_query_residual
# masked language modeling attributes
a__ : List[Any] = vocab_size
a__ : str = max_position_embeddings
# image classification attributes
a__ : Union[str, Any] = image_size
# flow attributes
a__ : Dict = train_size
# multimodal autoencoding attributes
a__ : Dict = num_frames
a__ : Tuple = audio_samples_per_frame
a__ : Dict = samples_per_patch
a__ : List[Any] = output_shape
class A__ ( A__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self : List[str] ):
if self.task == "multiple-choice":
a__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _UpperCamelCase( self : Tuple ):
return 1E-4
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[TensorType] = None , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 40 , lowerCamelCase__ : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ : Union[str, Any] = compute_effective_axis_dimension(
lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ : Tuple = preprocessor.num_special_tokens_to_add(lowerCamelCase__ )
a__ : Any = compute_effective_axis_dimension(
lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
a__ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
a__ : Tuple = dict(preprocessor(lowerCamelCase__ , return_tensors=lowerCamelCase__ ) )
a__ : Optional[int] = inputs.pop("input_ids" )
return inputs
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ : Any = compute_effective_axis_dimension(lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
a__ : Dict = self._generate_dummy_images(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = dict(preprocessor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) )
a__ : Optional[int] = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 715 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase : Dict = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase_ ( __a ) -> str:
if "://" in dataset_path:
a__ : Any = dataset_path.split("://" )[1]
return dataset_path
def UpperCamelCase_ ( __a ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = not is_remote_filesystem(__a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) )
else:
fs.mv(__a , __a , recursive=__a )
def UpperCamelCase_ ( ) -> None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
a__ : Tuple = None
a__ : int = None
a__ : int = threading.Lock()
| 151 | 0 |
'''simple docstring'''
_snake_case = {
'km/h': 1.0,
'm/s': 3.6,
'mph': 1.6_0_9_3_4_4,
'knot': 1.8_5_2,
}
_snake_case = {
'km/h': 1.0,
'm/s': 0.2_7_7_7_7_7_7_7_8,
'mph': 0.6_2_1_3_7_1_1_9_2,
'knot': 0.5_3_9_9_5_6_8_0_3,
}
def _A ( snake_case , snake_case , snake_case ) -> Any:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_lowercase : List[str] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = KandinskyVaaControlnetPipeline
lowerCAmelCase_ : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowerCAmelCase_ : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowerCAmelCase_ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_UpperCAmelCase , )
UpperCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create hint
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase__ = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0
UpperCAmelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
UpperCAmelCase__ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = """A robot, 4k photo"""
UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ = pipeline(
image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_00 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 603 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a ( unittest.TestCase ,_UpperCamelCase ):
def __snake_case ( self : int ):
snake_case : Union[str, Any] = load_tool('''text-to-speech''' )
self.tool.setup()
def __snake_case ( self : Optional[int] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case : List[str] = self.tool('''hey''' )
snake_case : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
def __snake_case ( self : Optional[Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case : List[str] = self.tool('''hey''' )
snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
| 713 |
'''simple docstring'''
from __future__ import annotations
import math
def A ( A_ : int ):
if num <= 0:
snake_case : List[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(A_ )
snake_case : Optional[Any] = [True] * (num + 1)
snake_case : List[str] = []
snake_case : List[Any] = 2
snake_case : str = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A_ ):
if sieve[i] is True:
snake_case : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 555 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__snake_case : str = b * b - 4 * a * c
__snake_case : Optional[Any] = (-b + sqrt(_lowerCamelCase )) / (2 * a)
__snake_case : Dict = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 26 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__lowerCamelCase = Generator(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , generator=__UpperCAmelCase , gen_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
def lowerCamelCase ( self ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 175 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : int = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = DebertaVaTokenizer
snake_case__ = DebertaVaTokenizerFast
snake_case__ = True
snake_case__ = True
def __lowerCAmelCase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = 'this is a test'
UpperCAmelCase__ = 'this is a test'
return input_text, output_text
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'<unk>' )
self.assertEqual(vocab_keys[-1] ,'[PAD]' )
self.assertEqual(len(lowerCamelCase__ ) ,30_001 )
def __lowerCAmelCase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,30_000 )
def __lowerCAmelCase ( self : Dict ):
# fmt: off
UpperCAmelCase__ = ' \tHeLLo!how \n Are yoU? '
UpperCAmelCase__ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __lowerCAmelCase ( self : int ):
pass
def __lowerCAmelCase ( self : Optional[Any] ):
# fmt: off
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
# fmt: off
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
# fmt: off
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
# fmt: off
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# fmt: off
UpperCAmelCase__ = ' \tHeLLo!how \n Are yoU? '
UpperCAmelCase__ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = 'This is a test'
UpperCAmelCase__ = [13, 1, 4_398, 25, 21, 1_289]
UpperCAmelCase__ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
UpperCAmelCase__ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = DebertaVaTokenizerFast(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# fmt: off
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCAmelCase__ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
UpperCAmelCase__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = DebertaVaTokenizer(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.encode('sequence builders' )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,lowerCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,lowerCamelCase__ ,)
@slow
def __lowerCAmelCase ( self : List[str] ):
# fmt: off
UpperCAmelCase__ = {'input_ids': [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='microsoft/deberta-v2-xlarge' ,revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' ,)
| 712 | """simple docstring"""
import re
def a_ ( lowerCamelCase ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
try:
UpperCAmelCase__ = split_input(lowerCamelCase )
if upper:
UpperCAmelCase__ = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase__ = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase ):
return to_simple_case(lowerCamelCase )
def a_ ( lowerCamelCase ):
try:
UpperCAmelCase__ = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '_' )
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 632 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : int=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[str]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=False) ->Tuple:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 87 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : Tuple = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : Optional[int] = use_attention_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Tuple = num_choices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[Any] = None
if self.use_attention_mask:
UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = config_and_inputs
UpperCamelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = True
__UpperCamelCase : Union[str, Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = FlaxBertModelTester(self )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 643 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =[0] * no_of_processes
__lowerCamelCase : List[str] =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =burst_time[i]
__lowerCamelCase : list[int] =[]
__lowerCamelCase : Union[str, Any] =0
__lowerCamelCase : Union[str, Any] =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase : Tuple =[]
__lowerCamelCase : str =-1
for i in range(SCREAMING_SNAKE_CASE ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase : Any =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase : Optional[Any] =i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase : Optional[int] =0
__lowerCamelCase : Dict =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =[0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
_UpperCamelCase = 4
_UpperCamelCase = [2, 5, 3, 7]
_UpperCamelCase = [0, 0, 0, 0]
_UpperCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCamelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 179 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : int = LEDConfig
__snake_case : int = {}
__snake_case : Any = """gelu"""
def __init__( self :Optional[Any] , __lowercase :List[Any] , __lowercase :int=13 , __lowercase :Union[str, Any]=7 , __lowercase :List[Any]=True , __lowercase :List[Any]=False , __lowercase :str=99 , __lowercase :Optional[int]=32 , __lowercase :List[Any]=2 , __lowercase :str=4 , __lowercase :str=37 , __lowercase :Any=0.1 , __lowercase :List[Any]=0.1 , __lowercase :Optional[Any]=20 , __lowercase :Union[str, Any]=2 , __lowercase :str=1 , __lowercase :List[str]=0 , __lowercase :List[Any]=4 , ):
__lowerCamelCase : Union[str, Any] =parent
__lowerCamelCase : List[Any] =batch_size
__lowerCamelCase : str =seq_length
__lowerCamelCase : List[str] =is_training
__lowerCamelCase : Dict =use_labels
__lowerCamelCase : int =vocab_size
__lowerCamelCase : Union[str, Any] =hidden_size
__lowerCamelCase : Any =num_hidden_layers
__lowerCamelCase : List[Any] =num_attention_heads
__lowerCamelCase : str =intermediate_size
__lowerCamelCase : Optional[Any] =hidden_dropout_prob
__lowerCamelCase : Optional[Any] =attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] =max_position_embeddings
__lowerCamelCase : Optional[Any] =eos_token_id
__lowerCamelCase : str =pad_token_id
__lowerCamelCase : Any =bos_token_id
__lowerCamelCase : Union[str, Any] =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : List[Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : Optional[Any] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : List[str] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[int] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : int =prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
__lowerCamelCase : Optional[Any] =tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
__lowerCamelCase : Any =global_attention_mask
return config, inputs_dict
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] , __lowercase :List[str] ):
__lowerCamelCase : int =TFLEDModel(config=__lowercase ).get_decoder()
__lowerCamelCase : Dict =inputs_dict['''input_ids''']
__lowerCamelCase : Optional[Any] =input_ids[:1, :]
__lowerCamelCase : List[Any] =inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase : List[Any] =1
# first forward pass
__lowerCamelCase : List[Any] =model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
__lowerCamelCase , __lowerCamelCase : Any =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Union[str, Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Dict =tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : str =model(__lowercase , attention_mask=__lowercase )[0]
__lowerCamelCase : List[str] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : List[str] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : Dict =output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : Dict =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__snake_case : Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__snake_case : Optional[Any] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case : int = True
__snake_case : int = False
__snake_case : Optional[int] = False
__snake_case : Optional[int] = False
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : int =TFLEDModelTester(self )
__lowerCamelCase : Any =ConfigTester(self , config_class=__lowercase )
def __lowercase ( self :Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self :str ):
__lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int =tf.zeros_like(inputs_dict['''attention_mask'''] )
__lowerCamelCase : List[Any] =2
__lowerCamelCase : Optional[Any] =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__lowerCamelCase : Tuple =True
__lowerCamelCase : Optional[Any] =self.model_tester.seq_length
__lowerCamelCase : List[str] =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowercase :Any ):
__lowerCamelCase : Union[str, Any] =outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowercase :List[str] ):
__lowerCamelCase : Any =[t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : Optional[int] =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] =True
__lowerCamelCase : int =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : str =model_class(__lowercase )
__lowerCamelCase : Any =model(self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCamelCase : int =len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
__lowerCamelCase : Optional[int] =model_class(__lowercase )
__lowerCamelCase : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : Any =True
__lowerCamelCase : Union[str, Any] =model_class(__lowercase )
__lowerCamelCase : Tuple =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : Optional[int] =True
__lowerCamelCase : Dict =model_class(__lowercase )
__lowerCamelCase : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowercase ( self :int ):
pass
def __lowercase ( self :int ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
_UpperCamelCase = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[str] ):
__lowerCamelCase : Dict =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__lowerCamelCase : List[str] =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Dict =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Optional[int] =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__lowerCamelCase : Dict =model(**__lowercase )[0]
__lowerCamelCase : Optional[int] =(1, 1024, 768)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__lowerCamelCase : Dict =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__lowerCamelCase : Union[str, Any] =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Any =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Tuple =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__lowerCamelCase : List[Any] =model(**__lowercase )[0]
__lowerCamelCase : str =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__lowerCamelCase : List[str] =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 , rtol=1e-3 )
| 179 | 1 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if number != int(UpperCAmelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A_ = [-1] * (number + 1)
A_ = 0
for i in range(1, number + 1 ):
A_ = sys.maxsize
A_ = int(math.sqrt(UpperCAmelCase__ ) )
for j in range(1, root + 1 ):
A_ = 1 + answers[i - (j**2)]
A_ = min(UpperCAmelCase__, UpperCAmelCase__ )
A_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a_ :Union[str, Any] = logging.get_logger(__name__)
a_ :Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a_ :int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a ( A__ ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_type_to_module_name(A__ )
SCREAMING_SNAKE_CASE__ : str = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A__ , '''__name__''' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE__ : List[str] = importlib.import_module('''transformers''' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def a ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(A__ , encoding='''utf-8''' ) as reader:
return json.load(A__ )
class lowercase :
def __init__( self : Any ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def lowercase__ ( cls : List[str] , _lowercase : Optional[int] , **_lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop('''config''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop('''trust_remote_code''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Any = config_dict.get('''feature_extractor_type''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ : List[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_lowercase , '''feature_extractor_type''' , _lowercase )
if hasattr(_lowercase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE__ : Dict = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor_class_from_name(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE__ : List[Any] = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('''code_revision''' , _lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE__ : Tuple = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowercase__ ( _lowercase : Dict , _lowercase : Optional[Any] ):
FEATURE_EXTRACTOR_MAPPING.register(_lowercase , _lowercase )
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = 'lilt'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=None , A=4 , A=1_024 , **A , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = classifier_dropout
_a = channel_shrink_ratio
_a = max_ad_position_embeddings
| 708 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase_ = get_tests_dir("fixtures")
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = mock.Mock()
_a = 500
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
_a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def a__ (self ) -> Dict:
"""simple docstring"""
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
_a = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(A )
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ) -> Any:
"""simple docstring"""
_a = TOKEN
HfFolder.save_token(A )
@classmethod
def a__ (cls ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id='''test-image-processor''' , push_to_hub=A , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=A , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
_a = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_a = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 352 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCamelCase_ = True
except ImportError:
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Dict:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@staticmethod
def A__ (UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =parser.add_parser('''add-new-model''')
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''')
add_new_model_parser.add_argument('''--testing_file''' , type=UpperCAmelCase , help='''Configuration file on which to run.''')
add_new_model_parser.add_argument(
'''--path''' , type=UpperCAmelCase , help='''Path to cookiecutter. Should only be used for testing purposes.''')
add_new_model_parser.set_defaults(func=UpperCAmelCase)
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , *UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =testing
__UpperCAmelCase =testing_file
__UpperCAmelCase =path
def A__ (self):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''')
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__UpperCAmelCase =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(UpperCAmelCase) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''')
__UpperCAmelCase =(
Path(UpperCAmelCase).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__UpperCAmelCase =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCAmelCase))
else:
with open(self._testing_file , '''r''') as configuration_file:
__UpperCAmelCase =json.load(UpperCAmelCase)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=UpperCAmelCase , extra_context=UpperCAmelCase , )
__UpperCAmelCase =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''') as configuration_file:
__UpperCAmelCase =json.load(UpperCAmelCase)
__UpperCAmelCase =configuration['''lowercase_modelname''']
__UpperCAmelCase =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""")
__UpperCAmelCase ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
__UpperCAmelCase ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__UpperCAmelCase ='''Flax''' in generate_tensorflow_pytorch_and_flax
__UpperCAmelCase =f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase)
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=UpperCAmelCase)
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w'''):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(UpperCAmelCase):
with open(UpperCAmelCase , '''r''') as f:
__UpperCAmelCase =f.readlines()
with open(UpperCAmelCase , '''w''') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCAmelCase)
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""")
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""")
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""")
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""")
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""")
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
# Create temp file
__UpperCAmelCase , __UpperCAmelCase =mkstemp()
__UpperCAmelCase =False
with fdopen(UpperCAmelCase , '''w''') as new_file:
with open(UpperCAmelCase) as old_file:
for line in old_file:
new_file.write(UpperCAmelCase)
if line_to_copy_below in line:
__UpperCAmelCase =True
for line_to_copy in lines_to_copy:
new_file.write(UpperCAmelCase)
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""")
# Copy the file permissions from the old file to the new file
copymode(UpperCAmelCase , UpperCAmelCase)
# Remove original file
remove(UpperCAmelCase)
# Move new file
move(UpperCAmelCase , UpperCAmelCase)
def skip_units(UpperCAmelCase):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCAmelCase):
with open(UpperCAmelCase) as datafile:
__UpperCAmelCase =[]
__UpperCAmelCase =False
__UpperCAmelCase =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__UpperCAmelCase =line.split('''"''')[1]
__UpperCAmelCase =skip_units(UpperCAmelCase)
elif "# Below: " in line and "##" not in line:
__UpperCAmelCase =line.split('''"''')[1]
__UpperCAmelCase =skip_units(UpperCAmelCase)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =[]
elif "# Replace with" in line and "##" not in line:
__UpperCAmelCase =[]
elif "##" not in line:
lines_to_copy.append(UpperCAmelCase)
remove(UpperCAmelCase)
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""")
os.rmdir(UpperCAmelCase)
| 132 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =multiprocessing.Manager()
__UpperCAmelCase =manager.list()
__UpperCAmelCase =multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__UpperCAmelCase =shutil.rmtree
__UpperCAmelCase =os.rmdir
__UpperCAmelCase =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__UpperCAmelCase ={}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
__UpperCAmelCase =rmtree
__UpperCAmelCase =rmdir
__UpperCAmelCase =chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
def signal_handler(snake_case__ , snake_case__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__UpperCAmelCase =WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Dict:
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
pass
class _SCREAMING_SNAKE_CASE ( io.StringIO ):
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return False
class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
a_ : Dict = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
if root == ".":
yield
return
__UpperCAmelCase =os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__=None ) -> Tuple:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__UpperCAmelCase =None
__UpperCAmelCase =None
import os
__UpperCAmelCase ='''1'''
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import shutil
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import subprocess
__UpperCAmelCase =None # type: ignore
__UpperCAmelCase =None
import sys
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
| 132 | 1 |
def a ( snake_case__: Union[str, Any] , snake_case__: Optional[Any] , snake_case__: Optional[Any] , snake_case__: Tuple ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def a ( snake_case__: int , snake_case__: int , snake_case__: Any ):
'''simple docstring'''
if curr_ind == len(lowercase_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowercase_ ) ):
if valid_connection(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
# Insert current vertex into path as next transition
lowercase_ = next_ver
# Validate created path
if util_hamilton_cycle(lowercase_ , lowercase_ , curr_ind + 1 ):
return True
# Backtrack
lowercase_ = -1
return False
def a ( snake_case__: Union[str, Any] , snake_case__: Optional[Any] = 0 ):
'''simple docstring'''
lowercase_ = [-1] * (len(lowercase_ ) + 1)
# initialize start and end of path with starting index
lowercase_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowercase_ , lowercase_ , 1 ) else []
| 707 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : int=2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , ) -> Union[str, Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = patch_size
lowercase_ = max_length
lowercase_ = num_mel_bins
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = frequency_stride
lowercase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase_ = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase_ = frequency_out_dimension * time_out_dimension
lowercase_ = num_patches + 2
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, input_values, labels
def _lowercase ( self : List[Any] ) -> Any:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
lowercase_ = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
a :List[Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
a :Optional[Any] = False
a :str = False
a :Optional[int] = False
a :int = False
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = ASTModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _lowercase ( self : Tuple ) -> str:
pass
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : int ) -> List[str]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
lowercase_ , lowercase_ = torchaudio.load(snake_case__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Tuple ) -> Any:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self : List[str] ) -> Any:
lowercase_ = self.default_feature_extractor
lowercase_ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_feature_extractor
lowercase_ , lowercase_ = prepare_audio()
lowercase_ = audio.squeeze().numpy()
lowercase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 409 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 201 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = 'mgp-str'
def __init__( self :Union[str, Any] , lowercase :Union[str, Any]=[3_2, 1_2_8] , lowercase :Tuple=4 , lowercase :Optional[int]=3 , lowercase :Union[str, Any]=2_7 , lowercase :List[str]=3_8 , lowercase :Optional[Any]=5_0_2_5_7 , lowercase :str=3_0_5_2_2 , lowercase :Dict=7_6_8 , lowercase :List[Any]=1_2 , lowercase :List[str]=1_2 , lowercase :Optional[Any]=4.0 , lowercase :Optional[Any]=True , lowercase :str=False , lowercase :Optional[Any]=1e-5 , lowercase :List[str]=0.0 , lowercase :List[Any]=0.0 , lowercase :Optional[int]=0.0 , lowercase :Dict=False , lowercase :Optional[Any]=0.02 , **lowercase :Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = max_token_length
SCREAMING_SNAKE_CASE = num_character_labels
SCREAMING_SNAKE_CASE = num_bpe_labels
SCREAMING_SNAKE_CASE = num_wordpiece_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = distilled
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = attn_drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = output_aa_attentions
SCREAMING_SNAKE_CASE = initializer_range | 201 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[Any] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ : str = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
__magic_name__ : Union[str, Any] = f"""{src_lang}-{tgt_lang}"""
__magic_name__ : Dict = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
__magic_name__ : int = os.path.join(lowerCamelCase , '''README.md''' )
print(f"""Generating {path}""" )
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCamelCase )
# make sure we are under the root of the project
A = Path(__file__).resolve().parent.parent.parent
A = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
A = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 147 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Dict , snake_case : Path , snake_case : Union[str, None] = None , snake_case : Union[List[str], None] = None , snake_case : Union[str, List[str], None] = None , snake_case : bool = True , ) -> int:
'''simple docstring'''
__magic_name__ : List[str] = [file for file in os.listdir(snake_case ) if os.path.isfile(os.path.join(snake_case , snake_case ) )]
if identifier is not None:
__magic_name__ : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case , snake_case ):
for n_ in n_identifier:
__magic_name__ : int = [file for file in files if n_ not in file]
else:
__magic_name__ : Tuple = [file for file in files if n_identifier not in file]
__magic_name__ : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
__magic_name__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , snake_case )
if only_modules:
__magic_name__ : List[Any] = file.split('''.''' )[0]
try:
__magic_name__ : Dict = getattr(snake_case , snake_case )
__magic_name__ : List[str] = doctest.DocTestSuite(snake_case )
__magic_name__ : Dict = unittest.TextTestRunner().run(snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__magic_name__ : Tuple = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = Path('''src/transformers''' )
__magic_name__ : str = '''modeling'''
__magic_name__ : str = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(snake_case , identifier=snake_case , ignore_files=snake_case )
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Dict = Path('''src/transformers''' )
__magic_name__ : Union[str, Any] = '''tokenization'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = Path('''src/transformers''' )
__magic_name__ : int = '''configuration'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = Path('''src/transformers''' )
__magic_name__ : str = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(snake_case , n_identifier=snake_case )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Any = Path('''docs/source''' )
__magic_name__ : str = ['''favicon.ico''']
self.analyze_directory(snake_case , ignore_files=snake_case , only_modules=snake_case )
| 147 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
__snake_case : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowercase_ ( __lowerCamelCase ):
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = "nezha"
def __init__( self , UpperCamelCase__=2_1_1_2_8 , UpperCamelCase__=7_6_8 , UpperCamelCase__=1_2 , UpperCamelCase__=1_2 , UpperCamelCase__=3_0_7_2 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=5_1_2 , UpperCamelCase__=6_4 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0.1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = max_relative_position
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = classifier_dropout
UpperCAmelCase_ = use_cache
| 660 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase_ = logging.get_logger(__name__)
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Optional[int] = "maskformer"
_A : List[str] = {"hidden_size": "mask_feature_size"}
_A : Tuple = ["resnet", "swin"]
_A : Optional[Any] = ["detr"]
def __init__( self : List[str] ,_UpperCamelCase : int = 2_5_6 ,_UpperCamelCase : int = 2_5_6 ,_UpperCamelCase : float = 0.1 ,_UpperCamelCase : bool = False ,_UpperCamelCase : Optional[Dict] = None ,_UpperCamelCase : Optional[Dict] = None ,_UpperCamelCase : float = 0.02 ,_UpperCamelCase : float = 1.0 ,_UpperCamelCase : float = 1.0 ,_UpperCamelCase : float = 1.0 ,_UpperCamelCase : float = 20.0 ,_UpperCamelCase : Optional[bool] = None ,**_UpperCamelCase : str ,) -> Tuple:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE__ =SwinConfig(
image_size=3_8_4 ,in_channels=3 ,patch_size=4 ,embed_dim=1_2_8 ,depths=[2, 2, 1_8, 2] ,num_heads=[4, 8, 1_6, 3_2] ,window_size=1_2 ,drop_path_rate=0.3 ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =backbone_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ =CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ =config_class.from_dict(_UpperCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE__ =DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE__ =(
decoder_config.pop("""model_type""" ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE__ =config_class.from_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =backbone_config
SCREAMING_SNAKE_CASE__ =decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE__ =fpn_feature_size
SCREAMING_SNAKE_CASE__ =mask_feature_size
# initializer
SCREAMING_SNAKE_CASE__ =init_std
SCREAMING_SNAKE_CASE__ =init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE__ =cross_entropy_weight
SCREAMING_SNAKE_CASE__ =dice_weight
SCREAMING_SNAKE_CASE__ =mask_weight
SCREAMING_SNAKE_CASE__ =use_auxiliary_loss
SCREAMING_SNAKE_CASE__ =no_object_weight
SCREAMING_SNAKE_CASE__ =output_auxiliary_logits
SCREAMING_SNAKE_CASE__ =self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ =self.decoder_config.num_hidden_layers
super().__init__(**_UpperCamelCase )
@classmethod
def __A ( cls : int ,_UpperCamelCase : PretrainedConfig ,_UpperCamelCase : PretrainedConfig ,**_UpperCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
return cls(
backbone_config=_UpperCamelCase ,decoder_config=_UpperCamelCase ,**_UpperCamelCase ,)
def __A ( self : int ) -> Dict[str, any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ =self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ =self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE__ =self.__class__.model_type
return output
| 151 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:])
class a__ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = StableDiffusionLatentUpscalePipeline
__UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__UpperCamelCase : str = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase : Tuple = frozenset([] )
__UpperCamelCase : str = True
@property
def _snake_case (self ):
__lowerCAmelCase = 1
__lowerCAmelCase = 4
__lowerCAmelCase = (16, 16)
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__lowercase , only_cross_attention=__lowercase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__lowerCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu'''
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__lowerCAmelCase = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1e-3 )
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _snake_case (self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _snake_case (self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def _snake_case (self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _snake_case (self ):
__lowerCAmelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 2
__lowerCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__lowerCAmelCase = getattr(__lowercase , scheduler_enum.name )
__lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe(**__lowercase )[0]
outputs.append(__lowercase )
assert check_same_shape(__lowercase )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self ):
__lowerCAmelCase = torch.manual_seed(33 )
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__lowerCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__lowerCAmelCase = pipe(__lowercase , generator=__lowercase , output_type='''latent''' ).images
__lowerCAmelCase = upscaler(
prompt=__lowercase , image=__lowercase , num_inference_steps=20 , guidance_scale=0 , generator=__lowercase , output_type='''np''' , ).images[0]
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _snake_case (self ):
__lowerCAmelCase = torch.manual_seed(33 )
__lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__lowerCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__lowerCAmelCase = upscaler(
prompt=__lowercase , image=__lowercase , num_inference_steps=20 , guidance_scale=0 , generator=__lowercase , output_type='''np''' , ).images[0]
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 474 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase, params=lowerCamelCase).content, '''html.parser''')
__lowerCAmelCase = soup.find('''div''', attrs={'''class''': '''gs_ri'''})
__lowerCAmelCase = div.find('''div''', attrs={'''class''': '''gs_fl'''}).find_all('''a''')
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 474 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=_lowerCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
__lowercase =EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0)
__lowercase =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
__lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
__lowercase =CLIPTextModel(_lowerCAmelCase)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_lowerCAmelCase)
__lowercase =CLIPTextModelWithProjection(_lowerCAmelCase)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_lowerCAmelCase)
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any=0):
'''simple docstring'''
__lowercase =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase)).to(_lowerCAmelCase)
__lowercase =image / 2 + 0.5
if str(_lowerCAmelCase).startswith('mps'):
__lowercase =torch.manual_seed(_lowerCAmelCase)
else:
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase =self.get_dummy_components()
__lowercase =StableDiffusionXLImgaImgPipeline(**_lowerCAmelCase)
__lowercase =sd_pipe.to(_lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase =self.get_dummy_inputs(_lowerCAmelCase)
__lowercase =sd_pipe(**_lowerCAmelCase).images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase =np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def __lowerCamelCase ( self : int):
'''simple docstring'''
pass
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_dummy_components()
__lowercase =StableDiffusionXLImgaImgPipeline(**_lowerCAmelCase)
__lowercase =sd_pipe.to(_lowerCAmelCase)
__lowercase =sd_pipe.to(_lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
# forward without prompt embeds
__lowercase =self.get_dummy_inputs(_lowerCAmelCase)
__lowercase =3 * ['this is a negative prompt']
__lowercase =negative_prompt
__lowercase =3 * [inputs['prompt']]
__lowercase =sd_pipe(**_lowerCAmelCase)
__lowercase =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowercase =self.get_dummy_inputs(_lowerCAmelCase)
__lowercase =3 * ['this is a negative prompt']
__lowercase =3 * [inputs.pop('prompt')]
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =sd_pipe.encode_prompt(_lowerCAmelCase , negative_prompt=_lowerCAmelCase)
__lowercase =sd_pipe(
**_lowerCAmelCase , prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , pooled_prompt_embeds=_lowerCAmelCase , negative_pooled_prompt_embeds=_lowerCAmelCase , )
__lowercase =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : int):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple="cpu" , _lowerCAmelCase : Any=torch.floataa , _lowerCAmelCase : List[Any]=0):
'''simple docstring'''
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowercase =np.random.RandomState(_lowerCAmelCase).standard_normal((1, 4, 6_4, 6_4))
__lowercase =torch.from_numpy(_lowerCAmelCase).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase)
__lowercase ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base')
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase =self.get_inputs(_lowerCAmelCase)
__lowercase =pipe(**_lowerCAmelCase).images
__lowercase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506])
assert np.abs(image_slice - expected_slice).max() < 7e-3
| 474 |
'''simple docstring'''
def _A ( _lowerCAmelCase = 50 ):
"""simple docstring"""
__lowercase =[[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"{solution() = }")
| 474 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __UpperCamelCase ( UpperCAmelCase = 8 ):
lowercase__ : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase__ )
lowercase__ : Optional[Any] = i // 3
lowercase__ : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase__ : int = (
chars_incl
+ random(UpperCamelCase__ , quotient + remainder )
+ random(UpperCamelCase__ , UpperCamelCase__ )
+ random(UpperCamelCase__ , UpperCamelCase__ )
)
lowercase__ : Optional[Any] = list(UpperCamelCase__ )
shuffle(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
# random is a generalised function for letters, characters and numbers
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
pass # Put your code here...
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
pass # Put your code here...
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
pass # Put your code here...
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 8 ):
if len(UpperCamelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase__ : Union[str, Any] = any(char in ascii_uppercase for char in password )
lowercase__ : Any = any(char in ascii_lowercase for char in password )
lowercase__ : Dict = any(char in digits for char in password )
lowercase__ : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __UpperCamelCase ( ):
lowercase__ : List[str] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase__ : Union[str, Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCamelCase__ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCamelCase__ , UpperCamelCase__ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 152 |
import pytest
import datasets
# Import fixture modules as plugins
a__: Dict = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple )->List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int )->Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / '''cache'''
A__ = test_hf_cache_home / '''datasets'''
A__ = test_hf_cache_home / '''metrics'''
A__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(UpperCamelCase__ ) )
A__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
A__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope='''session''' )
def UpperCamelCase__( )->int:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str )->Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , UpperCamelCase__ )
| 190 | 0 |
"""simple docstring"""
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
UpperCAmelCase__ = word.split()
def justify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase__ = max_width - width
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE__ ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase__ = []
for i in range(SCREAMING_SNAKE_CASE__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE__ ) + len(SCREAMING_SNAKE_CASE__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE__ )
width += len(SCREAMING_SNAKE_CASE__ )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# reset new line and new width
UpperCAmelCase__ = [word], len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max_width - width - len(SCREAMING_SNAKE_CASE__ )
answer.append(""" """.join(SCREAMING_SNAKE_CASE__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def A__ ( self , __lowercase ):
with open(__lowercase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase__ = input_file.read()
UpperCAmelCase__ = regexp.search(__lowercase )
return match
def A__ ( self , __lowercase ):
with open(__lowercase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ = regexp.finditer(__lowercase )
UpperCAmelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ):
UpperCAmelCase__ = Path("""./datasets""" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self ):
UpperCAmelCase__ = Path("""./datasets""" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 422 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_ :
"""simple docstring"""
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return None
class lowerCAmelCase_ :
"""simple docstring"""
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return None
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
a_ :Tuple =[
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __a ( self : Any ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE__ , """tf""" , 1_2 , **SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
def __a ( self : Any ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 1_2 , **SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
from transformers import BertModel
__a = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
vocab_file.flush()
__a = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__a = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE__ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 1_2 , SCREAMING_SNAKE_CASE__ )
@require_tf
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__a = self._test_export(SCREAMING_SNAKE_CASE__ , """tf""" , 1_2 , **SCREAMING_SNAKE_CASE__ )
__a = quantize(Path(SCREAMING_SNAKE_CASE__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE__ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def __a ( self : int ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__a = self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 1_2 , **SCREAMING_SNAKE_CASE__ )
__a = quantize(SCREAMING_SNAKE_CASE__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE__ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
__a = Path(SCREAMING_SNAKE_CASE__ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_tokenizers
@slow
def __a ( self : str ):
'''simple docstring'''
from transformers import BertModel
__a = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__a = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """pt""" )
@require_tf
@require_tokenizers
@slow
def __a ( self : Tuple ):
'''simple docstring'''
from transformers import TFBertModel
__a = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__a = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """tf""" )
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__a , __a , __a , __a = infer_shapes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = ["""input_ids""", """attention_mask""", """token_type_ids"""]
__a = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__a , __a = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE__ ) , set(SCREAMING_SNAKE_CASE__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE__ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__a , __a = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def __a ( self : List[Any] ):
'''simple docstring'''
__a = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 582 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
pass
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for shard in shards:
for i in range(__SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def __lowercase ( ) -> Tuple:
"""simple docstring"""
__a = int(os.environ["""RANK"""] )
__a = int(os.environ["""WORLD_SIZE"""] )
__a = ArgumentParser()
parser.add_argument("""--streaming""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--local_rank""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_workers""" , type=__SCREAMING_SNAKE_CASE , default=0 )
__a = parser.parse_args()
__a = args.streaming
__a = args.num_workers
__a = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(__SCREAMING_SNAKE_CASE )]}
__a = IterableDataset.from_generator(__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE )
if not streaming:
__a = Dataset.from_list(list(__SCREAMING_SNAKE_CASE ) )
__a = split_dataset_by_node(__SCREAMING_SNAKE_CASE , rank=__SCREAMING_SNAKE_CASE , world_size=__SCREAMING_SNAKE_CASE )
__a = torch.utils.data.DataLoader(__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE )
__a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 582 | 1 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> List[str]:
__UpperCamelCase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "embed_dim" ) )
self.parent.assertTrue(hasattr(a , "num_heads" ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :List[Any] , a :Dict , a :Optional[int]=1_3 , a :Any=6_4 , a :List[str]=3 , a :int=[1_6, 4_8, 9_6] , a :str=[1, 3, 6] , a :Optional[Any]=[1, 2, 1_0] , a :Dict=[7, 3, 3] , a :List[Any]=[4, 2, 2] , a :Optional[Any]=[2, 1, 1] , a :Tuple=[2, 2, 2] , a :Union[str, Any]=[False, False, True] , a :int=[0.0, 0.0, 0.0] , a :str=0.02 , a :str=1E-1_2 , a :Optional[int]=True , a :List[str]=True , a :List[Any]=2 , ) -> Optional[Any]:
__UpperCamelCase : Any = parent
__UpperCamelCase : Optional[int] = batch_size
__UpperCamelCase : Optional[int] = image_size
__UpperCamelCase : Optional[int] = patch_sizes
__UpperCamelCase : Optional[Any] = patch_stride
__UpperCamelCase : List[str] = patch_padding
__UpperCamelCase : Any = is_training
__UpperCamelCase : Any = use_labels
__UpperCamelCase : int = num_labels
__UpperCamelCase : Tuple = num_channels
__UpperCamelCase : Dict = embed_dim
__UpperCamelCase : Dict = num_heads
__UpperCamelCase : Union[str, Any] = stride_kv
__UpperCamelCase : Tuple = depth
__UpperCamelCase : Optional[int] = cls_token
__UpperCamelCase : Tuple = attention_drop_rate
__UpperCamelCase : str = initializer_range
__UpperCamelCase : Any = layer_norm_eps
def _lowerCamelCase ( self :Optional[int] ) -> int:
__UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : str = None
if self.use_labels:
__UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self :Tuple ) -> Dict:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self :str , a :List[Any] , a :Optional[Any] , a :List[Any] ) -> int:
__UpperCamelCase : List[str] = CvtModel(config=a )
model.to(a )
model.eval()
__UpperCamelCase : str = model(a )
__UpperCamelCase : List[str] = (self.image_size, self.image_size)
__UpperCamelCase : int = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__UpperCamelCase : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__UpperCamelCase : Tuple = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _lowerCamelCase ( self :str , a :Tuple , a :Optional[Any] , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : Optional[Any] = self.num_labels
__UpperCamelCase : Optional[Any] = CvtForImageClassification(a )
model.to(a )
model.eval()
__UpperCamelCase : Any = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :str ) -> int:
__UpperCamelCase : str = self.prepare_config_and_inputs()
__UpperCamelCase : str = config_and_inputs
__UpperCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_A = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def _lowerCamelCase ( self :Tuple ) -> Dict:
__UpperCamelCase : Optional[int] = CvtModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self :int ) -> Tuple:
return
@unittest.skip(reason="Cvt does not output attentions" )
def _lowerCamelCase ( self :List[str] ) -> Optional[Any]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _lowerCamelCase ( self :int ) -> int:
pass
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[Any] = model_class(a )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Any = [*signature.parameters.keys()]
__UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self :Dict ) -> Optional[int]:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Tuple ) -> Union[str, Any]:
def check_hidden_states_output(a :str , a :str , a :List[str] ):
__UpperCamelCase : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__UpperCamelCase : int = model(**self._prepare_for_class(a , a ) )
__UpperCamelCase : Union[str, Any] = outputs.hidden_states
__UpperCamelCase : Any = len(self.model_tester.depth )
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : Dict = True
check_hidden_states_output(a , a , a )
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
pass
@slow
def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[str] = CvtModel.from_pretrained(a )
self.assertIsNotNone(a )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self :int ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCamelCase ( self :Dict ) -> int:
__UpperCamelCase : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a )
__UpperCamelCase : int = self.default_image_processor
__UpperCamelCase : int = prepare_img()
__UpperCamelCase : int = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
__UpperCamelCase : Dict = model(**a )
# verify the logits
__UpperCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
__UpperCamelCase : Any = torch.tensor([0.9285, 0.9015, -0.3150] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 94 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase :List[Any] = {'''UserAgent''': UserAgent().random}
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Tuple = script.contents[0]
__magic_name__ : Any = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = F'https://www.instagram.com/{username}/'
__magic_name__ : str = self.get_json()
def __lowerCAmelCase ( self : Tuple ) -> dict:
__magic_name__ : Tuple = requests.get(self.url , headers=_A ).text
__magic_name__ : Union[str, Any] = BeautifulSoup(_A , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ) -> str:
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : List[str] ) -> str:
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
return self.user_data["username"]
@property
def __lowerCAmelCase ( self : List[str] ) -> str:
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self : Tuple ) -> str:
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self : Optional[int] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self : List[str] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self : str ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self : List[Any] ) -> bool:
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self : List[Any] ) -> bool:
return self.user_data["is_private"]
def lowerCamelCase ( lowerCAmelCase : str = "github" ):
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__magic_name__ : List[Any] = InstagramUser(lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase :int = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }') | 561 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCamelCase :
'''simple docstring'''
A_ : Tuple = XGLMConfig
A_ : str = {}
A_ : Any = """gelu"""
def __init__( self : Union[str, Any] , _A : List[str] , _A : Dict=14 , _A : Any=7 , _A : Any=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Any=99 , _A : Optional[Any]=32 , _A : Union[str, Any]=2 , _A : Union[str, Any]=4 , _A : List[Any]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[int]=0.1 , _A : List[Any]=512 , _A : Tuple=0.02 , ) -> str:
__magic_name__ : Any = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : int = is_training
__magic_name__ : List[Any] = use_input_mask
__magic_name__ : Tuple = use_labels
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : str = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Any = ffn_dim
__magic_name__ : List[str] = activation_function
__magic_name__ : Optional[Any] = activation_dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Optional[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Optional[Any] = None
__magic_name__ : str = 0
__magic_name__ : Optional[Any] = 2
__magic_name__ : int = 1
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
__magic_name__ : Tuple = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__magic_name__ : int = None
if self.use_input_mask:
__magic_name__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_A , )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : List[Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A_ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A_ : Union[str, Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
A_ : Dict = False
A_ : Union[str, Any] = False
A_ : Tuple = False
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : Optional[Any] = TFXGLMModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=_A , n_embd=37 )
def __lowerCAmelCase ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
@slow
def __lowerCAmelCase ( self : Any ) -> int:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = TFXGLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
super().test_resize_token_embeddings()
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : str , _A : Optional[int]=True ) -> Dict:
__magic_name__ : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__magic_name__ : str = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__magic_name__ : str = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__magic_name__ : Union[str, Any] = model.generate(_A , do_sample=_A , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__magic_name__ : List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__magic_name__ : Optional[int] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__magic_name__ : Optional[int] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__magic_name__ : Tuple = model.generate(_A , do_sample=_A , seed=[7, 0] )
__magic_name__ : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
__magic_name__ : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_A , _A )
@slow
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__magic_name__ : Optional[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__magic_name__ : Dict = 'left'
# use different length sentences to test batching
__magic_name__ : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__magic_name__ : int = tokenizer(_A , return_tensors='tf' , padding=_A )
__magic_name__ : Dict = inputs['input_ids']
__magic_name__ : Any = model.generate(input_ids=_A , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__magic_name__ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__magic_name__ : Dict = model.generate(input_ids=_A , max_new_tokens=12 )
__magic_name__ : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__magic_name__ : int = model.generate(input_ids=_A , max_new_tokens=12 )
__magic_name__ : Dict = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__magic_name__ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
__magic_name__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
__magic_name__ : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] ) | 561 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> list[list[int]]:
SCREAMING_SNAKE_CASE__ = []
create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCAmelCase , total_number - level + 2 ):
current_list.append(__UpperCAmelCase )
create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase )
current_list.pop()
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> None:
for i in total_list:
print(*__UpperCAmelCase )
if __name__ == "__main__":
_A = 4
_A = 2
_A = generate_all_combinations(n, k)
print_all_state(total_list)
| 538 | """simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("RGB" )
return image
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ = dct.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE__ = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
SCREAMING_SNAKE_CASE__ = qkv_bias
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 364 if "coco" in model_name else 224
SCREAMING_SNAKE_CASE__ = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE__ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE__ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE__ = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
SCREAMING_SNAKE_CASE__ = tokenizer("\n" , add_special_tokens=__UpperCAmelCase ).input_ids[0]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = BlipaForConditionalGeneration(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
SCREAMING_SNAKE_CASE__ = "cuda" if torch.cuda.is_available() else "cpu"
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print("Done!" )
# update state dict keys
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE__ = state_dict.pop(__UpperCAmelCase )
if key.startswith("Qformer.bert" ):
SCREAMING_SNAKE_CASE__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
SCREAMING_SNAKE_CASE__ = key.replace("self" , "attention" )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
SCREAMING_SNAKE_CASE__ = key.replace("opt" , "language" )
if key.startswith("t5" ):
SCREAMING_SNAKE_CASE__ = key.replace("t5" , "language" )
SCREAMING_SNAKE_CASE__ = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE__ = load_demo_image()
SCREAMING_SNAKE_CASE__ = vis_processors["eval"](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(__UpperCAmelCase )
# create processor
SCREAMING_SNAKE_CASE__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = processor(images=__UpperCAmelCase , return_tensors="pt" ).pixel_values.to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE__ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
SCREAMING_SNAKE_CASE__ = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits
else:
SCREAMING_SNAKE_CASE__ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
SCREAMING_SNAKE_CASE__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE__ = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=__UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=__UpperCAmelCase )
else:
# cast to same type
SCREAMING_SNAKE_CASE__ = logits.dtype
assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = tokenizer(__UpperCAmelCase , return_tensors="pt" ).input_ids.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = original_model.generate({"image": original_pixel_values} )
SCREAMING_SNAKE_CASE__ = hf_model.generate(
__UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = input_ids.shape[1]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [text.strip() for text in output_text]
print("HF generation:" , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
_A = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 538 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
snake_case_ = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
snake_case_ = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Dict = PRETRAINED_INIT_CONFIGURATION
A_ : Optional[Any] = RoFormerTokenizer
def __init__(self : Tuple , a__ : Tuple=None , a__ : Dict=None , a__ : Any=True , a__ : Dict="[UNK]" , a__ : Any="[SEP]" , a__ : str="[PAD]" , a__ : Union[str, Any]="[CLS]" , a__ : List[str]="[MASK]" , a__ : List[str]=True , a__ : Any=None , **a__ : List[str] , ):
"""simple docstring"""
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , a__ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , a__ ) != strip_accents
):
__snake_case = getattr(a__ , pre_tok_state.pop('''type''' ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = pre_tok_class(**a__ )
__snake_case = do_lower_case
def __getstate__(self : Optional[int] ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = BertPreTokenizer()
return state
def __setstate__(self : str , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = d
__snake_case = self.__dict__['''_tokenizer'''].get_vocab()
__snake_case = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def a (self : Optional[Any] , a__ : Any , a__ : str=None ):
"""simple docstring"""
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a (self : Any , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : Any , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def a (self : Optional[Any] , a__ : Dict , a__ : Tuple=None , a__ : Optional[int]=None , a__ : str=False , **a__ : Any , ):
"""simple docstring"""
__snake_case = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 592 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : int = 'xlnet'
A_ : str = ['mems']
A_ : str = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self : str , a__ : Any=3_2000 , a__ : List[str]=1024 , a__ : List[str]=24 , a__ : Dict=16 , a__ : Optional[int]=4096 , a__ : Optional[int]="gelu" , a__ : Optional[Any]=True , a__ : int="bi" , a__ : Optional[Any]=0.0_2 , a__ : str=1E-12 , a__ : Dict=0.1 , a__ : List[str]=512 , a__ : Optional[int]=None , a__ : Optional[int]=True , a__ : Any=False , a__ : List[Any]=False , a__ : List[Any]=-1 , a__ : Optional[Any]=False , a__ : List[Any]="last" , a__ : Dict=True , a__ : Union[str, Any]="tanh" , a__ : Optional[int]=0.1 , a__ : Tuple=5 , a__ : str=5 , a__ : int=5 , a__ : Any=1 , a__ : List[Any]=2 , **a__ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = d_model
__snake_case = n_layer
__snake_case = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
__snake_case = d_model // n_head
__snake_case = ff_activation
__snake_case = d_inner
__snake_case = untie_r
__snake_case = attn_type
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = dropout
__snake_case = mem_len
__snake_case = reuse_len
__snake_case = bi_data
__snake_case = clamp_len
__snake_case = same_length
__snake_case = summary_type
__snake_case = summary_use_proj
__snake_case = summary_activation
__snake_case = summary_last_dropout
__snake_case = start_n_top
__snake_case = end_n_top
__snake_case = bos_token_id
__snake_case = pad_token_id
__snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , a__ , )
__snake_case = kwargs['''use_cache''']
__snake_case = use_mems_eval
__snake_case = use_mems_train
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
@property
def a (self : Any ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a (self : Dict , a__ : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 592 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
snake_case_ = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Optional[int] ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__lowercase ) , version.parse(__lowercase ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[str] = None ):
SCREAMING_SNAKE_CASE : int = F'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , __lowercase ):
SCREAMING_SNAKE_CASE : List[str] = requirement, None, None
else:
SCREAMING_SNAKE_CASE : Optional[int] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , __lowercase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
SCREAMING_SNAKE_CASE : Tuple = match[0]
SCREAMING_SNAKE_CASE : Optional[Any] = want_full.split(''',''' ) # there could be multiple requirements
SCREAMING_SNAKE_CASE : Optional[int] = {}
for w in want_range:
SCREAMING_SNAKE_CASE : int = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , __lowercase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
SCREAMING_SNAKE_CASE : Any = match[0]
SCREAMING_SNAKE_CASE : int = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
SCREAMING_SNAKE_CASE : Union[str, Any] = '.'.join([str(__lowercase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return
# check if any version is installed
try:
SCREAMING_SNAKE_CASE : Dict = importlib.metadata.version(__lowercase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : int = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(__lowercase , __lowercase )
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( _lowercase ):
__magic_name__ : Optional[Any] = "nllb-moe"
__magic_name__ : Optional[Any] = ["past_key_values"]
__magic_name__ : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self : int, __UpperCAmelCase : Tuple=128112, __UpperCAmelCase : Any=1024, __UpperCAmelCase : Optional[Any]=12, __UpperCAmelCase : Optional[int]=4096, __UpperCAmelCase : Any=16, __UpperCAmelCase : Any=12, __UpperCAmelCase : Optional[Any]=4096, __UpperCAmelCase : Optional[int]=16, __UpperCAmelCase : List[Any]=0.05, __UpperCAmelCase : Dict=0.05, __UpperCAmelCase : Dict=True, __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : Any="relu", __UpperCAmelCase : Union[str, Any]=1024, __UpperCAmelCase : Optional[int]=0.1, __UpperCAmelCase : Tuple=0.1, __UpperCAmelCase : List[Any]=0.0, __UpperCAmelCase : Optional[int]=0.02, __UpperCAmelCase : Tuple=2, __UpperCAmelCase : int=True, __UpperCAmelCase : int=False, __UpperCAmelCase : int="float32", __UpperCAmelCase : Optional[Any]=False, __UpperCAmelCase : List[str]=128, __UpperCAmelCase : Dict=64, __UpperCAmelCase : Dict=4, __UpperCAmelCase : Optional[Any]=4, __UpperCAmelCase : Optional[Any]=0.001, __UpperCAmelCase : Optional[Any]=0.001, __UpperCAmelCase : Optional[Any]="all", __UpperCAmelCase : List[str]=False, __UpperCAmelCase : Dict=False, __UpperCAmelCase : Any=1.0, __UpperCAmelCase : Dict=0.2, __UpperCAmelCase : int=1, __UpperCAmelCase : Union[str, Any]=0, __UpperCAmelCase : Any=2, __UpperCAmelCase : Union[str, Any]=False, **__UpperCAmelCase : Dict, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[int] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : List[str] = router_z_loss_coef
SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
SCREAMING_SNAKE_CASE : int = decoder_sparse_step
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_sparse_step
SCREAMING_SNAKE_CASE : Union[str, Any] = num_experts
SCREAMING_SNAKE_CASE : List[Any] = expert_capacity
SCREAMING_SNAKE_CASE : Optional[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : int = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Dict = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Dict = second_expert_policy
SCREAMING_SNAKE_CASE : Tuple = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : List[str] = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : List[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : List[Any] = output_router_logits
super().__init__(
pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, is_encoder_decoder=__UpperCAmelCase, decoder_start_token_id=__UpperCAmelCase, **__UpperCAmelCase, )
| 355 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE :List[str] = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 628 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCamelCase_ = False
if num < 0:
UpperCamelCase_ = True
UpperCamelCase_ = -num
UpperCamelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = """backbone.""" if is_semantic else """"""
A : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Any = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : List[str] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : str = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : Tuple = in_proj_weight[
: config.hidden_size, :
]
A : List[str] = q_bias
A : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A : Optional[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[str] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Tuple = gamma_a
A : List[Any] = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : str = dct.pop(_lowerCAmelCase )
A : Any = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
"""simple docstring"""
A : List[Any] = False if """rvlcdip""" in checkpoint_url else True
A : Tuple = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Tuple = 1024
A : Union[str, Any] = 4096
A : Tuple = 24
A : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
A : Dict = 16
A : List[str] = """huggingface/label-files"""
A : str = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : int = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : Optional[Any] = idalabel
A : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : int = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : List[str] = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : Dict = prepare_img()
A : List[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : Optional[Any] = encoding["""pixel_values"""]
A : Any = model(_lowerCAmelCase )
A : List[Any] = outputs.logits
# verify logits
A : Dict = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Optional[int] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : Any = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Tuple = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 520 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : str = size if size is not None else {"""shortest_edge""": 384}
A : List[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : str = do_resize
A : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Any = resample
A : List[Any] = do_rescale
A : Union[str, Any] = rescale_factor
A : Tuple = do_normalize
A : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : int = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : Optional[int] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : List[str] = do_resize if do_resize is not None else self.do_resize
A : str = crop_pct if crop_pct is not None else self.crop_pct
A : List[str] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Dict = size if size is not None else self.size
A : Dict = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Dict = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : List[str] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Any = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 520 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
A_ : List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
A_ : List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
A_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A_ : Optional[int] = CLIPTextModel(snake_case )
A_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ : Any = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :int=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
A_ : Optional[Any] = torch.manual_seed(snake_case )
else:
A_ : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
A_ : Union[str, Any] = 2
A_ : Dict = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case , device=torch.device(snake_case ) , )
A_ : Any = floats_tensor(control_image.shape , rng=random.Random(snake_case ) ).to(snake_case )
A_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Union[str, Any] = Image.fromarray(np.uinta(snake_case ) ).convert("RGB" ).resize((64, 64) )
A_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(snake_case :Optional[int] ):
if isinstance(snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A_ : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(snake_case )
torch.manual_seed(0 )
A_ : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(snake_case )
torch.manual_seed(0 )
A_ : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A_ : Dict = CLIPTextModel(snake_case )
A_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ : Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
A_ : Union[str, Any] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str , snake_case :Optional[int]=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
A_ : List[Any] = torch.manual_seed(snake_case )
else:
A_ : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
A_ : Dict = 2
A_ : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case , device=torch.device(snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case , device=torch.device(snake_case ) , ),
]
A_ : Dict = floats_tensor(control_image[0].shape , rng=random.Random(snake_case ) ).to(snake_case )
A_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : List[str] = Image.fromarray(np.uinta(snake_case ) ).convert("RGB" ).resize((64, 64) )
A_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : str = self.get_dummy_components()
A_ : List[str] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
A_ : str = 10.0
A_ : List[Any] = 4
A_ : Tuple = self.get_dummy_inputs(snake_case )
A_ : int = steps
A_ : Any = scale
A_ : List[Any] = pipe(**snake_case )[0]
A_ : Optional[Any] = self.get_dummy_inputs(snake_case )
A_ : Union[str, Any] = steps
A_ : Union[str, Any] = scale
A_ : Tuple = pipe(**snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
A_ : Any = self.get_dummy_inputs(snake_case )
A_ : List[Any] = steps
A_ : Tuple = scale
A_ : Any = pipe(**snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
A_ : Optional[Any] = self.get_dummy_inputs(snake_case )
A_ : str = steps
A_ : Dict = scale
A_ : str = pipe(**snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Tuple = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
A_ : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=snake_case , controlnet=snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case )
A_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
A_ : Optional[int] = "evil space-punk bird"
A_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
A_ : List[str] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
A_ : Any = pipe(
snake_case , snake_case , control_image=snake_case , generator=snake_case , output_type="np" , num_inference_steps=50 , strength=0.6 , )
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
A_ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 454 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''umt5'''
__UpperCamelCase = ['''past_key_values''']
def __init__( self :int , snake_case :Optional[Any]=250_112 , snake_case :Optional[int]=512 , snake_case :Any=64 , snake_case :Union[str, Any]=1_024 , snake_case :Tuple=8 , snake_case :Optional[int]=None , snake_case :Union[str, Any]=6 , snake_case :List[Any]=32 , snake_case :Dict=128 , snake_case :List[str]=0.1 , snake_case :List[Any]=1e-6 , snake_case :Dict=1.0 , snake_case :Union[str, Any]="gated-gelu" , snake_case :Union[str, Any]=True , snake_case :Any=True , snake_case :List[str]="T5Tokenizer" , snake_case :Union[str, Any]=True , snake_case :Union[str, Any]=0 , snake_case :List[Any]=1 , snake_case :List[Any]=0 , **snake_case :Any , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=snake_case , tokenizer_class=snake_case , tie_word_embeddings=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
A_ : Union[str, Any] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = d_kv
A_ : Union[str, Any] = d_ff
A_ : Any = num_layers
A_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : List[Any] = num_heads
A_ : List[str] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : Optional[Any] = dropout_rate
A_ : Any = layer_norm_epsilon
A_ : List[Any] = initializer_factor
A_ : Any = feed_forward_proj
A_ : Optional[Any] = use_cache
A_ : int = self.feed_forward_proj.split("-" )
A_ : Any = act_info[-1]
A_ : Tuple = act_info[0] == "gated"
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
A_ : Optional[Any] = "gelu_new"
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return self.d_model
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return self.num_heads
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self.num_layers
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
A_ : Any = "past_encoder_sequence + sequence"
A_ : Union[str, Any] = {0: "batch"}
A_ : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A_ : Dict = {0: "batch", 1: "decoder_sequence"}
A_ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return 13
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return 5e-4
| 454 | 1 |
'''simple docstring'''
import re
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : str = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__A : List[str] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 706 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : Any = logging.getLogger(__name__)
class __UpperCamelCase ( lowercase__ ):
def a__ ( self :int ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Any=None ):
snake_case_ : List[str] = self.layer[current_layer](_UpperCamelCase ,_UpperCamelCase ,head_mask[current_layer] )
snake_case_ : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , lowercase__ , )
class __UpperCamelCase ( lowercase__ ):
def __init__( self :Dict ,_UpperCamelCase :Optional[Any] ):
super().__init__(_UpperCamelCase )
snake_case_ : List[Any] = BertEncoderWithPabee(_UpperCamelCase )
self.init_weights()
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
snake_case_ : str = 0
def a__ ( self :Tuple ,_UpperCamelCase :int ):
snake_case_ : Optional[Any] = threshold
def a__ ( self :Optional[Any] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : str = patience
def a__ ( self :Optional[int] ):
snake_case_ : List[str] = 0
snake_case_ : List[Any] = 0
def a__ ( self :List[Any] ):
snake_case_ : str = self.inference_layers_num / self.inference_instances_num
snake_case_ : List[str] = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_UpperCamelCase )
@add_start_docstrings_to_model_forward(_UpperCamelCase )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Any=None ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :Any=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Any=None ,_UpperCamelCase :int=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :int=False ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Any = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Union[str, Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Any = torch.ones(_UpperCamelCase ,device=_UpperCamelCase )
if token_type_ids is None:
snake_case_ : str = torch.zeros(_UpperCamelCase ,dtype=torch.long ,device=_UpperCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
snake_case_ , snake_case_ , snake_case_ : List[Any] = encoder_hidden_states.size()
snake_case_ : str = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
snake_case_ : List[str] = torch.ones(_UpperCamelCase ,device=_UpperCamelCase )
snake_case_ : Optional[Any] = self.invert_attention_mask(_UpperCamelCase )
else:
snake_case_ : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : List[Any] = self.get_head_mask(_UpperCamelCase ,self.config.num_hidden_layers )
snake_case_ : Dict = self.embeddings(
input_ids=_UpperCamelCase ,position_ids=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,inputs_embeds=_UpperCamelCase )
snake_case_ : Optional[int] = embedding_output
if self.training:
snake_case_ : Any = []
for i in range(self.config.num_hidden_layers ):
snake_case_ : Any = self.encoder.adaptive_forward(
_UpperCamelCase ,current_layer=_UpperCamelCase ,attention_mask=_UpperCamelCase ,head_mask=_UpperCamelCase )
snake_case_ : Dict = self.pooler(_UpperCamelCase )
snake_case_ : str = output_layers[i](output_dropout(_UpperCamelCase ) )
res.append(_UpperCamelCase )
elif self.patience == 0: # Use all layers for inference
snake_case_ : List[str] = self.encoder(
_UpperCamelCase ,attention_mask=_UpperCamelCase ,head_mask=_UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,encoder_attention_mask=_UpperCamelCase ,)
snake_case_ : int = self.pooler(encoder_outputs[0] )
snake_case_ : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCamelCase )]
else:
snake_case_ : Any = 0
snake_case_ : List[str] = None
snake_case_ : Any = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
snake_case_ : Dict = self.encoder.adaptive_forward(
_UpperCamelCase ,current_layer=_UpperCamelCase ,attention_mask=_UpperCamelCase ,head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.pooler(_UpperCamelCase )
snake_case_ : Optional[Any] = output_layers[i](_UpperCamelCase )
if regression:
snake_case_ : Union[str, Any] = logits.detach()
if patient_result is not None:
snake_case_ : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
snake_case_ : Union[str, Any] = 0
else:
snake_case_ : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
snake_case_ : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCamelCase ) ):
patient_counter += 1
else:
snake_case_ : List[str] = 0
snake_case_ : List[Any] = logits
if patient_counter == self.patience:
break
snake_case_ : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , lowercase__ , )
class __UpperCamelCase ( lowercase__ ):
def __init__( self :Tuple ,_UpperCamelCase :List[str] ):
super().__init__(_UpperCamelCase )
snake_case_ : List[str] = config.num_labels
snake_case_ : Dict = BertModelWithPabee(_UpperCamelCase )
snake_case_ : List[str] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : str = nn.ModuleList(
[nn.Linear(config.hidden_size ,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCamelCase )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :int=None ,_UpperCamelCase :Union[str, Any]=None ,):
snake_case_ : int = self.bert(
input_ids=_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,position_ids=_UpperCamelCase ,head_mask=_UpperCamelCase ,inputs_embeds=_UpperCamelCase ,output_dropout=self.dropout ,output_layers=self.classifiers ,regression=self.num_labels == 1 ,)
snake_case_ : Optional[int] = (logits[-1],)
if labels is not None:
snake_case_ : Optional[Any] = None
snake_case_ : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCamelCase ):
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : List[str] = loss_fct(logits_item.view(-1 ) ,labels.view(-1 ) )
else:
snake_case_ : Any = CrossEntropyLoss()
snake_case_ : str = loss_fct(logits_item.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
if total_loss is None:
snake_case_ : str = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
snake_case_ : str = (total_loss / total_weights,) + outputs
return outputs | 267 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _lowercase ( __A="" ):
'''simple docstring'''
__UpperCamelCase = tempfile.mkdtemp()
return os.path.join(__A ,str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
__UpperCamelCase = AgentAudio(lowercase )
__UpperCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase ) )
# Ensure that the file contains the same value as the original tensor
__UpperCamelCase , __UpperCamelCase = sf.read(lowercase )
self.assertTrue(torch.allclose(lowercase , torch.tensor(lowercase ) , atol=1E-4 ) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
__UpperCamelCase = get_new_path(suffix=""".wav""" )
sf.write(lowercase , lowercase , 1_6_0_0_0 )
__UpperCamelCase = AgentAudio(lowercase )
self.assertTrue(torch.allclose(lowercase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase )
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
__UpperCamelCase = AgentImage(lowercase )
__UpperCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase ) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__UpperCamelCase = Image.open(lowercase )
__UpperCamelCase = AgentImage(lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase ) )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__UpperCamelCase = Image.open(lowercase )
__UpperCamelCase = AgentImage(lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase ) )
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = """Hey!"""
__UpperCamelCase = AgentText(lowercase )
self.assertEqual(lowercase , agent_type.to_string() )
self.assertEqual(lowercase , agent_type.to_raw() )
self.assertEqual(lowercase , lowercase )
| 601 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = RobertaTokenizer
__SCREAMING_SNAKE_CASE = RobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = {'''cls_token''': '''<s>'''}
def __lowerCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCamelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__UpperCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCamelCase = {"""unk_token""": """<unk>"""}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def __lowerCamelCase ( self , **lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , **lowercase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
__UpperCamelCase = """lower newer"""
__UpperCamelCase = """lower newer"""
return input_text, output_text
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__UpperCamelCase = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.tokenizer_class.from_pretrained("""roberta-base""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = """Encode this sequence."""
__UpperCamelCase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
__UpperCamelCase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
__UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase )
__UpperCamelCase = """Encode <mask> sequence"""
__UpperCamelCase = """Encode <mask>sequence"""
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = encoded.index(lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = encoded.index(lowercase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Any:
pass
def __lowerCamelCase ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = """A, <mask> AllenNLP sentence."""
__UpperCamelCase = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
__UpperCamelCase = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __lowerCamelCase ( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase = f"{text_of_1_token} {text_of_1_token}"
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
__UpperCamelCase = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 601 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
A = 8
def UpperCamelCase_ ( lowerCamelCase : Dict , lowerCamelCase : Optional[int]=BITS ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = x.device
__magic_name__ : int = (x * 255).int().clamp(0 , 255 )
__magic_name__ : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase )
__magic_name__ : Union[str, Any] = rearrange(lowerCamelCase , '''d -> d 1 1''' )
__magic_name__ : Tuple = rearrange(lowerCamelCase , '''b c h w -> b c 1 h w''' )
__magic_name__ : Tuple = ((x & mask) != 0).float()
__magic_name__ : List[Any] = rearrange(lowerCamelCase , '''b c d h w -> b (c d) h w''' )
__magic_name__ : Dict = bits * 2 - 1
return bits
def UpperCamelCase_ ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=BITS ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = x.device
__magic_name__ : Optional[Any] = (x > 0).int()
__magic_name__ : Union[str, Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase , dtype=torch.intaa )
__magic_name__ : Union[str, Any] = rearrange(lowerCamelCase , '''d -> d 1 1''' )
__magic_name__ : List[str] = rearrange(lowerCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
__magic_name__ : int = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def UpperCamelCase_ ( self : Optional[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : float = 0.0 , lowerCamelCase : bool = True , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__magic_name__ : List[str] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__magic_name__ : List[Any] = self.alphas_cumprod[timestep]
__magic_name__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__magic_name__ : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__magic_name__ : int = self.bit_scale
if self.config.clip_sample:
__magic_name__ : Union[str, Any] = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__magic_name__ : Optional[int] = self._get_variance(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__magic_name__ : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : List[str] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__magic_name__ : Dict = model_output.device if torch.is_tensor(lowerCamelCase ) else '''cpu'''
__magic_name__ : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase ).to(lowerCamelCase )
__magic_name__ : Tuple = self._get_variance(lowerCamelCase , lowerCamelCase ) ** 0.5 * eta * noise
__magic_name__ : Union[str, Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def UpperCamelCase_ ( self : Dict , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int="epsilon" , lowerCamelCase : int=None , lowerCamelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__magic_name__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__magic_name__ , __magic_name__ : int = torch.split(lowerCamelCase , sample.shape[1] , dim=1 )
else:
__magic_name__ : Union[str, Any] = None
# 1. compute alphas, betas
__magic_name__ : int = self.alphas_cumprod[t]
__magic_name__ : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one
__magic_name__ : str = 1 - alpha_prod_t
__magic_name__ : Dict = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__magic_name__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__magic_name__ : Optional[Any] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__magic_name__ : str = self.bit_scale
if self.config.clip_sample:
__magic_name__ : Any = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[str] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__magic_name__ : List[str] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Dict = 0
if t > 0:
__magic_name__ : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase ).to(model_output.device )
__magic_name__ : str = (self._get_variance(lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
__magic_name__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Any , snake_case : UNetaDConditionModel , snake_case : Union[DDIMScheduler, DDPMScheduler] , snake_case : Optional[float] = 1.0 , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__magic_name__ : str = bit_scale
__magic_name__ : str = (
ddim_bit_scheduler_step if isinstance(snake_case , snake_case ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self : str , snake_case : Optional[int] = 256 , snake_case : Optional[int] = 256 , snake_case : Optional[int] = 50 , snake_case : Optional[torch.Generator] = None , snake_case : Optional[int] = 1 , snake_case : Optional[str] = "pil" , snake_case : bool = True , **snake_case : int , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
__magic_name__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=snake_case , )
__magic_name__ : Tuple = decimal_to_bits(snake_case ) * self.bit_scale
__magic_name__ : Dict = latents.to(self.device )
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__magic_name__ : Any = self.unet(snake_case , snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[str] = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
__magic_name__ : List[str] = bits_to_decimal(snake_case )
if output_type == "pil":
__magic_name__ : Tuple = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 147 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : float ) -> float:
"""simple docstring"""
return 10 - x * x
def UpperCamelCase_ ( lowerCamelCase : float , lowerCamelCase : float ) -> float:
"""simple docstring"""
if equation(lowerCamelCase ) * equation(lowerCamelCase ) >= 0:
raise ValueError('''Wrong space!''' )
__magic_name__ : int = a
while (b - a) >= 0.0_1:
# Find middle point
__magic_name__ : str = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase ) * equation(lowerCamelCase ) < 0:
__magic_name__ : Union[str, Any] = c
else:
__magic_name__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 147 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any ):
'''simple docstring'''
__lowercase = len(__UpperCamelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
__lowercase = 0
print(__UpperCamelCase , end=""",""" )
# Consider rest of the activities
for j in range(__UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__UpperCamelCase , end=""",""" )
__lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : int = [1, 3, 0, 5, 8, 5]
snake_case : Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 566 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from typing import Any
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , ) -> list:
"""simple docstring"""
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
UpperCAmelCase_ : dict = {}
UpperCAmelCase_ : dict = {}
for state in states_space:
UpperCAmelCase_ : Optional[int] = observations_space[0]
UpperCAmelCase_ : List[str] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase_ : str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Any = observations_space[o]
UpperCAmelCase_ : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = -1
for k_state in states_space:
UpperCAmelCase_ : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase_ : List[Any] = probability
UpperCAmelCase_ : List[Any] = k_state
# Update probabilities and pointers dicts
UpperCAmelCase_ : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase_ : str = arg_max
# The final observation
UpperCAmelCase_ : str = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
UpperCAmelCase_ : Tuple = ""
UpperCAmelCase_ : str = -1
for k_state in states_space:
UpperCAmelCase_ : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase_ : Optional[Any] = probability
UpperCAmelCase_ : Optional[Any] = k_state
UpperCAmelCase_ : Any = arg_max
# Process pointers backwards
UpperCAmelCase_ : Union[str, Any] = last_state
UpperCAmelCase_ : List[str] = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ) -> None:
"""simple docstring"""
_validate_list(_SCREAMING_SNAKE_CASE , "observations_space" )
_validate_list(_SCREAMING_SNAKE_CASE , "states_space" )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = F'''{var_name} must be a list'''
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = F'''{var_name} must be a list of strings'''
raise ValueError(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
_validate_dict(_SCREAMING_SNAKE_CASE , "initial_probabilities" , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , "transition_probabilities" )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , "emission_probabilities" )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = F'''{var_name} must be a dict'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
UpperCAmelCase_ : Optional[Any] = F'''{var_name} all keys must be strings'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
UpperCAmelCase_ : str = "nested dictionary " if nested else ""
UpperCAmelCase_ : Optional[int] = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 323 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__snake_case , __snake_case , __snake_case = False, False, False
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Optional[int] = None
_a : bool = True
_a : bool = True
_a : Optional[str] = None
# Automatically constructed
_a : ClassVar[str] = "dict"
_a : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_a : str = field(default='''Audio''' , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def UpperCAmelCase__( self , lowerCamelCase__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase__ : int = BytesIO()
sf.write(lowerCamelCase__ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase__ : Tuple = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
lowercase__ : Union[str, Any] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2767
lowercase__ : Union[str, Any] = BytesIO(bytes() )
sf.write(lowerCamelCase__ , lowerCamelCase__ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowercase__ , lowercase__ : Tuple = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowercase__ : Dict = xsplitext(lowerCamelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowercase__ : str = token_per_repo_id or {}
lowercase__ : str = path.split("""::""" )[-1]
try:
lowercase__ : Dict = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase__ : List[str] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase__ : Union[str, Any] = None
with xopen(lowerCamelCase__ , """rb""" , use_auth_token=lowerCamelCase__ ) as f:
lowercase__ , lowercase__ : int = sf.read(lowerCamelCase__ )
else:
lowercase__ , lowercase__ : Union[str, Any] = sf.read(lowerCamelCase__ )
lowercase__ : str = array.T
if self.mono:
lowercase__ : Optional[int] = librosa.to_mono(lowerCamelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase__ : List[Any] = librosa.resample(lowerCamelCase__ , orig_sr=lowerCamelCase__ , target_sr=self.sampling_rate )
lowercase__ : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase__( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def UpperCAmelCase__( self , lowerCamelCase__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowercase__ : Tuple = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
lowercase__ : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ : Tuple = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
lowercase__ : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowercase__ : List[Any] = pa.array([Audio().encode_example(lowerCamelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase__ : List[str] = storage.field("""bytes""" )
else:
lowercase__ : Dict = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase__ : Union[str, Any] = storage.field("""path""" )
else:
lowercase__ : Union[str, Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
lowercase__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase__ ):
with xopen(lowerCamelCase__ , """rb""" ) as f:
lowercase__ : Optional[Any] = f.read()
return bytes_
lowercase__ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ : Any = pa.array(
[os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase__ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type ) | 200 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__snake_case = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__( cls ) -> Dict:
lowercase__ : str = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCAmelCase__( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="""test-config""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowercase__ : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-config-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Dict:
CustomConfig.register_for_auto_class()
lowercase__ : int = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowercase__ : Any = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : List[str] = c.n_embd + 1 # int
lowercase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowercase__ : int = not c.scale_attn_weights # bool
lowercase__ : Optional[int] = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(lowerCamelCase__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : List[str] = PretrainedConfig()
lowercase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowercase__ : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {', '.join(lowerCamelCase__ )}.''' )
def UpperCAmelCase__( self ) -> Optional[Any]:
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
lowercase__ : Dict = mock.Mock()
lowercase__ : str = 500
lowercase__ : int = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : Dict = {}
# Download this model to make sure it's in the cache.
lowercase__ : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase__ ) as mock_head:
lowercase__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowercase__ : Union[str, Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
lowercase__ : Optional[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Optional[int] = ["""config.42.0.0.json"""]
lowercase__ : Optional[int] = 768
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , """config.4.0.0.json""" ) , os.path.join(lowerCamelCase__ , """config.42.0.0.json""" ) )
lowercase__ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase__( self ) -> List[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Any = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowercase__ : Union[str, Any] = """v4.0.0"""
lowercase__ , lowercase__ : int = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : str = """v3.0.0"""
lowercase__ : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 ) | 200 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __A ( UpperCamelCase__ ):
def __lt__( self :List[Any] , __snake_case :Dict ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self :Any , __snake_case :Optional[Any] ):
'''simple docstring'''
return self[-1] == other[-1]
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[Stack] =[]
# sort into stacks
for element in collection:
__magic_name__ : int =Stack([element] )
__magic_name__ : Dict =bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
__magic_name__ : Any =merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase_ : Dict = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 367 |
UpperCAmelCase_ : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 367 | 1 |
import qiskit
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__snake_case = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__snake_case = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 371 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase = get_logger(__name__)
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : Optional[Any] = '''dummy_data'''
snake_case__ : Optional[int] = '''datasets'''
snake_case__ : Any = False
def __init__( self , a__ , a__ , a__ , a__ = None , a__ = False , a__ = True , a__ = None , ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = dataset_name
__SCREAMING_SNAKE_CASE : List[str] = cache_dir
__SCREAMING_SNAKE_CASE : Optional[int] = use_local_dummy_data
__SCREAMING_SNAKE_CASE : Optional[Any] = config
# download_callbacks take a single url as input
__SCREAMING_SNAKE_CASE : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__SCREAMING_SNAKE_CASE : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__SCREAMING_SNAKE_CASE : List[Any] = str(a__ )
# to be downloaded
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Any = None
@property
def a_ ( self ):
if self._dummy_file is None:
__SCREAMING_SNAKE_CASE : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def a_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def a_ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__SCREAMING_SNAKE_CASE : int = cached_path(
a__ , cache_dir=self.cache_dir , extract_compressed_file=a__ , force_extract=a__ )
return os.path.join(a__ , self.dummy_file_name )
@property
def a_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a_ ( self ):
if self._bucket_url is None:
__SCREAMING_SNAKE_CASE : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def a_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def a_ ( self , a__ , *a__ ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__SCREAMING_SNAKE_CASE : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(a__ , a__ ):
return self.create_dummy_data_dict(a__ , a__ )
elif isinstance(a__ , (list, tuple) ):
return self.create_dummy_data_list(a__ , a__ )
else:
return self.create_dummy_data_single(a__ , a__ )
def a_ ( self , a__ , *a__ ):
return self.download_and_extract(a__ )
def a_ ( self , a__ , a__ ):
return self.download_and_extract(a__ )
def a_ ( self , a__ , *a__ , **a__ ):
return path
def a_ ( self ):
return {}
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a__ , a__ ):
for single_url in single_urls:
download_callback(a__ )
else:
__SCREAMING_SNAKE_CASE : Any = single_urls
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Any = [os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) ) for x in single_urls]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = single_urls
__SCREAMING_SNAKE_CASE : str = os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) )
__SCREAMING_SNAKE_CASE : List[str] = value
# make sure that values are unique
if all(isinstance(a__ , a__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__SCREAMING_SNAKE_CASE : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__SCREAMING_SNAKE_CASE : Optional[int] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , a__ ) ) for url in data_url )
__SCREAMING_SNAKE_CASE : List[str] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [data_url[0]] * len(a__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__SCREAMING_SNAKE_CASE : str = os.path.join(a__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(a__ )
return dummy_data_list
def a_ ( self , a__ , a__ ):
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(a__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self , a__ ):
def _iter_archive_members(a__ ):
# this preserves the order of the members inside the ZIP archive
__SCREAMING_SNAKE_CASE : Dict = Path(self.dummy_file ).parent
__SCREAMING_SNAKE_CASE : str = path.relative_to(a__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__SCREAMING_SNAKE_CASE : Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = Path(a__ )
__SCREAMING_SNAKE_CASE : int = _iter_archive_members(a__ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(a__ ).as_posix(), file_path.open("rb" )
def a_ ( self , a__ ):
if not isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [paths]
for path in paths:
if os.path.isfile(a__ ):
if os.path.basename(a__ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a__ ):
if os.path.basename(a__ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(a__ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(a__ , a__ )
| 211 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , a : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase , lowercase , lowercase , lowercase = hidden_states.shape
lowercase = jax.image.resize(
a , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase = self.conv(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , a : Dict ) -> Tuple:
"""simple docstring"""
lowercase = self.conv(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int = None
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : bool = None
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase = self.in_channels if self.out_channels is None else self.out_channels
lowercase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = nn.Dense(a , dtype=self.dtype )
lowercase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase = nn.Dropout(self.dropout_prob )
lowercase = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase = None
if use_nin_shortcut:
lowercase = nn.Conv(
a , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : List[Any] , a : str , a : Any , a : Dict=True ) -> int:
"""simple docstring"""
lowercase = hidden_states
lowercase = self.norma(a )
lowercase = nn.swish(a )
lowercase = self.conva(a )
lowercase = self.time_emb_proj(nn.swish(a ) )
lowercase = jnp.expand_dims(jnp.expand_dims(a , 1 ) , 1 )
lowercase = hidden_states + temb
lowercase = self.norma(a )
lowercase = nn.swish(a )
lowercase = self.dropout(a , a )
lowercase = self.conva(a )
if self.conv_shortcut is not None:
lowercase = self.conv_shortcut(a )
return hidden_states + residual | 718 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
lowercase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase = transform(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
return image
def A_ ( __UpperCamelCase : str ):
if "visual_encoder" in key:
lowercase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __UpperCamelCase )
if "blocks" in key:
lowercase = re.sub(R'''blocks''' , '''layers''' , __UpperCamelCase )
if "attn" in key:
lowercase = re.sub(R'''attn''' , '''self_attn''' , __UpperCamelCase )
if "norm1" in key:
lowercase = re.sub(R'''norm1''' , '''layer_norm1''' , __UpperCamelCase )
if "norm2" in key:
lowercase = re.sub(R'''norm2''' , '''layer_norm2''' , __UpperCamelCase )
if "encoder.norm" in key:
lowercase = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __UpperCamelCase )
if "encoder.patch_embed.proj" in key:
lowercase = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __UpperCamelCase )
if "encoder.pos_embed" in key:
lowercase = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __UpperCamelCase )
if "encoder.cls_token" in key:
lowercase = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __UpperCamelCase )
if "self_attn" in key:
lowercase = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __UpperCamelCase )
return key
@torch.no_grad()
def A_ ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None ):
if config_path is not None:
lowercase = BlipConfig.from_pretrained(__UpperCamelCase )
else:
lowercase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
lowercase = BlipForConditionalGeneration(__UpperCamelCase ).eval()
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase = blip_decoder(pretrained=__UpperCamelCase , image_size=3_84 , vit='''base''' )
lowercase = pt_model.eval()
lowercase = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
hf_model.load_state_dict(__UpperCamelCase )
lowercase = 3_84
lowercase = load_demo_image(image_size=__UpperCamelCase , device='''cpu''' )
lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase = tokenizer(['''a picture of'''] ).input_ids
lowercase = hf_model.generate(__UpperCamelCase , __UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
lowercase = hf_model.generate(__UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase = blip_vqa(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
vqa_model.eval()
lowercase = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForQuestionAnswering(__UpperCamelCase )
hf_vqa_model.load_state_dict(__UpperCamelCase )
lowercase = ['''How many dogs are in this image?''']
lowercase = tokenizer(__UpperCamelCase , return_tensors='''pt''' ).input_ids
lowercase = hf_vqa_model.generate(__UpperCamelCase , __UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase = blip_itm(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
itm_model.eval()
lowercase = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForImageTextRetrieval(__UpperCamelCase )
lowercase = ['''A picture of a woman with a dog sitting in a beach''']
lowercase = tokenizer(
__UpperCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__UpperCamelCase )
hf_itm_model.eval()
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 396 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.