code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( __snake_case : Dict ):
lowercase_ : Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowercase ( __snake_case : List[str] ):
lowercase_ , lowercase_ : Dict = emb.weight.shape
lowercase_ : Dict = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowercase_ : List[str] = emb.weight.data
return lin_layer
def lowercase ( __snake_case : Optional[Any] , __snake_case : Dict="facebook/mbart-large-en-ro" , __snake_case : str=False , __snake_case : Union[str, Any]=False ):
lowercase_ : str = torch.load(__snake_case , map_location='''cpu''' )['''model''']
remove_ignore_keys_(__snake_case )
lowercase_ : Optional[int] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase_ : str = MBartConfig.from_pretrained(__snake_case , vocab_size=__snake_case )
if mbart_aa and finetuned:
lowercase_ : Optional[Any] = '''relu'''
lowercase_ : List[Any] = state_dict['''decoder.embed_tokens.weight''']
lowercase_ : int = MBartForConditionalGeneration(__snake_case )
model.model.load_state_dict(__snake_case )
if finetuned:
lowercase_ : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__A : List[str] = parser.parse_args()
__A : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 231 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCAmelCase :
def __init__( self : Tuple , A : Any , A : Dict=13 , A : Union[str, Any]=7 , A : List[Any]=True , A : List[Any]=True , A : Tuple=False , A : Optional[Any]=True , A : Tuple=99 , A : Tuple=32 , A : Dict=5 , A : int=4 , A : List[Any]=37 , A : Optional[int]="gelu" , A : List[str]=0.1 , A : List[Any]=0.1 , A : Optional[Any]=5_12 , A : Dict=16 , A : str=2 , A : int=0.02 , A : Optional[int]=3 , A : Tuple=4 , A : List[str]=None , ) -> Union[str, Any]:
lowercase_ : Dict = parent
lowercase_ : List[str] = batch_size
lowercase_ : int = seq_length
lowercase_ : List[str] = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : List[Any] = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : Union[str, Any] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Tuple = num_labels
lowercase_ : Union[str, Any] = num_choices
lowercase_ : Optional[int] = scope
def A ( self : str ) -> Optional[int]:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : List[str] = None
lowercase_ : str = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ) -> int:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def A ( self : List[Any] , A : Optional[Any] , A : str , A : Union[str, Any] , A : Dict , A : Optional[int] , A : str , A : Union[str, Any] ) -> Any:
lowercase_ : Optional[int] = LlamaModel(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A , attention_mask=A )
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , A : Dict , A : Optional[int] , A : List[Any] , A : List[Any] , A : int , A : List[str] , A : int , A : List[Any] , A : int , ) -> Tuple:
lowercase_ : str = True
lowercase_ : str = LlamaModel(A )
model.to(A )
model.eval()
lowercase_ : str = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowercase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowercase_ : Dict = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , A : Optional[Any] , A : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : Dict , A : Optional[int] , A : Union[str, Any] , A : List[Any] , A : List[Any] , ) -> Tuple:
lowercase_ : Optional[Any] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , A : List[str] , A : Dict , A : Dict , A : int , A : Any , A : Optional[int] , A : str , A : Dict , A : Optional[Any] , ) -> int:
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : List[str] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowercase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowercase_ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowercase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowercase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Dict = False
def A ( self : Dict ) -> List[Any]:
lowercase_ : Any = LlamaModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def A ( self : List[Any] ) -> Union[str, Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : int = type
self.model_tester.create_and_check_model(*A )
def A ( self : int ) -> Optional[int]:
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = 3
lowercase_ : Dict = input_dict['''input_ids''']
lowercase_ : List[str] = input_ids.ne(1 ).to(A )
lowercase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : int = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : int = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : int ) -> Optional[int]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = 3
lowercase_ : Tuple = '''single_label_classification'''
lowercase_ : str = input_dict['''input_ids''']
lowercase_ : Any = input_ids.ne(1 ).to(A )
lowercase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Any = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Any ) -> Union[str, Any]:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = 3
lowercase_ : int = '''multi_label_classification'''
lowercase_ : Optional[Any] = input_dict['''input_ids''']
lowercase_ : Dict = input_ids.ne(1 ).to(A )
lowercase_ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : Optional[Any] = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def A ( self : Union[str, Any] ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def A ( self : int , A : int ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = ids_tensor([1, 10] , config.vocab_size )
lowercase_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : Optional[Any] = LlamaModel(A )
original_model.to(A )
original_model.eval()
lowercase_ : List[str] = original_model(A ).last_hidden_state
lowercase_ : int = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase_ : int = LlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowercase_ : Union[str, Any] = scaled_model(A ).last_hidden_state
lowercase_ : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : List[str] ) -> List[str]:
lowercase_ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase_ : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase_ : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : Tuple ) -> str:
lowercase_ : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase_ : Tuple = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowercase_ : Optional[Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Union[str, Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : List[Any] ) -> Dict:
lowercase_ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase_ : List[Any] = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowercase_ : List[str] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Dict = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def A ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase_ : Union[str, Any] = model(torch.tensor(A ) )
lowercase_ : Any = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# fmt: off
lowercase_ : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def A ( self : str ) -> Tuple:
lowercase_ : List[str] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase_ : Any = '''Simply put, the theory of relativity states that '''
lowercase_ : Optional[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase_ : Union[str, Any] = tokenizer.encode(A , return_tensors='''pt''' )
lowercase_ : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=A )
# greedy generation outputs
lowercase_ : List[str] = model.generate(A , max_new_tokens=64 , top_p=A , temperature=1 , do_sample=A )
lowercase_ : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=A )
self.assertEqual(A , A )
| 231 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class UpperCamelCase__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ ="convnextv2"
def __init__( self , _A=3 , _A=4 , _A=4 , _A=None , _A=None , _A="gelu" , _A=0.02 , _A=1E-12 , _A=0.0 , _A=224 , _A=None , _A=None , **_A , ) -> Tuple:
super().__init__(**__A )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_stages
SCREAMING_SNAKE_CASE_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE_ = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 703 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( __lowerCamelCase = "laptop" ):
SCREAMING_SNAKE_CASE_ = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(__lowerCamelCase, headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''', attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''}, ), soup.find_all('''div''', attrs={'''class''': '''a-row a-size-base a-color-base'''} ), ):
try:
SCREAMING_SNAKE_CASE_ = item.ha.text
SCREAMING_SNAKE_CASE_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE_ = '''Not available'''
try:
SCREAMING_SNAKE_CASE_ = (
'''₹'''
+ item.find(
'''span''', attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = ''''''
try:
SCREAMING_SNAKE_CASE_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''', '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE_ = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE_ = ''' '''
SCREAMING_SNAKE_CASE_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 597 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCAmelCase : List[Any] , ) -> Tuple:
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = False
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = '''gelu'''
_A = 0.1
_A = 0.1
_A = 5_12
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def snake_case_ ( self : str ) -> Optional[int]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Any:
_A = TFDistilBertModel(config=__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
_A = [input_ids, input_mask]
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = TFDistilBertForMaskedLM(config=__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[Any]:
_A = TFDistilBertForQuestionAnswering(config=__lowerCAmelCase )
_A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> Tuple:
_A = self.num_labels
_A = TFDistilBertForSequenceClassification(__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> str:
_A = self.num_choices
_A = TFDistilBertForMultipleChoice(__lowerCAmelCase )
_A = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> Dict:
_A = self.num_labels
_A = TFDistilBertForTokenClassification(__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Optional[int] ) -> Any:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a__ : Dict = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : str = False
a__ : Optional[Any] = False
def snake_case_ ( self : int ) -> Optional[int]:
_A = TFDistilBertModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def snake_case_ ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_A = TFDistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_A = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(__lowerCAmelCase )[0]
_A = [1, 6, 7_68]
self.assertEqual(output.shape , __lowerCAmelCase )
_A = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 2 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__A : Tuple = TypeVar('T')
class __UpperCamelCase ( Generic[T] ):
def __init__( self :Optional[Any] ,_UpperCamelCase :T ):
snake_case_ : str = data
snake_case_ : Node[T] | None = None
def __str__( self :int ):
return F'''{self.data}'''
class __UpperCamelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
snake_case_ : Node[T] | None = None
def __iter__( self :Dict ):
snake_case_ : List[Any] = self.top
while node:
yield node.data
snake_case_ : Union[str, Any] = node.next
def __str__( self :Union[str, Any] ):
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self :Union[str, Any] ):
return len(tuple(iter(self ) ) )
def a__ ( self :Optional[Any] ):
return self.top is None
def a__ ( self :Dict ,_UpperCamelCase :T ):
snake_case_ : Union[str, Any] = Node(_UpperCamelCase )
if not self.is_empty():
snake_case_ : Optional[int] = self.top
snake_case_ : Any = node
def a__ ( self :Dict ):
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top ,_UpperCamelCase )
snake_case_ : Any = self.top
snake_case_ : Optional[int] = self.top.next
return pop_node.data
def a__ ( self :List[Any] ):
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def a__ ( self :Union[str, Any] ):
snake_case_ : Dict = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 334 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
A = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
A = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = MBartTokenizer
lowercase_ = []
lowercase_ = []
def __init__( self : int , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : Any="<mask>" , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : Dict = False if not self.vocab_file else True
__UpperCAmelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
__UpperCAmelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase_) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCAmelCase : Tuple = src_lang if src_lang is not None else "en_XX"
__UpperCAmelCase : List[str] = self.convert_tokens_to_ids(self._src_lang)
__UpperCAmelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def a_ ( self : str):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a_ ( self : Dict , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
__UpperCAmelCase : Dict = src_lang
__UpperCAmelCase : int = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : Any = self.convert_tokens_to_ids(UpperCamelCase_)
__UpperCAmelCase : str = tgt_lang_id
return inputs
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "en_XX" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "ro_RO" , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = src_lang
__UpperCAmelCase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : str):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self : int):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self : List[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase_)
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def a_ ( self : Optional[Any] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : int = self.convert_tokens_to_ids(UpperCamelCase_)
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : int = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 701 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCAmelCase : int = grid[0]
for row_n in range(1 , len(UpperCamelCase ) ):
__UpperCAmelCase : int = grid[row_n]
__UpperCAmelCase : str = fill_row(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = grid[row_n]
return grid[-1][-1]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( lowerCamelCase : Callable , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
lowerCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase = np.zeros((n + 1,) )
lowerCAmelCase = ya
lowerCAmelCase = xa
for k in range(lowerCamelCase ):
lowerCAmelCase = y[k] + step_size * ode_func(lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
# Initialise PyTorch model
lowerCAmelCase = RemBertConfig.from_json_file(lowerCamelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCamelCase ) ) )
lowerCAmelCase = RemBertModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCamelCase ) )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 133 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Optional[Any] = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=14 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=4 , _snake_case=4 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=0.02 , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[Any] = rotary_dim
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[Any] = vocab_size - 1
_lowerCAmelCase : List[str] = vocab_size - 1
_lowerCAmelCase : Dict = vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Tuple = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Optional[Any] = 20
_lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
_lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , UpperCamelCase_ )
_lowerCAmelCase : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowerCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase : Union[str, Any] = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
_lowerCAmelCase : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase_ , )
_lowerCAmelCase : Any = model(UpperCamelCase_ )
_lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = 20
_lowerCAmelCase : int = model_class_name(UpperCamelCase_ )
_lowerCAmelCase : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowerCAmelCase : Tuple = model.init_cache(input_ids.shape[0] , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
_lowerCAmelCase : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase : Union[str, Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
_lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
_lowerCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A ( A__ ,A__ ,unittest.TestCase ):
'''simple docstring'''
a_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = FlaxGPTJModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_lowerCAmelCase : str = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
_lowerCAmelCase : Tuple = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase : Dict = False
_lowerCAmelCase : List[Any] = model.config.eos_token_id
_lowerCAmelCase : str = jax.jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowerCAmelCase : Any = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = pt_inputs['''input_ids'''].shape
_lowerCAmelCase : Dict = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Optional[int] = pt_model_class(UpperCamelCase_ ).eval()
_lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ , dtype=jnp.floataa )
_lowerCAmelCase : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase_ )
_lowerCAmelCase : List[str] = fx_state
with torch.no_grad():
_lowerCAmelCase : Dict = pt_model(**UpperCamelCase_ ).to_tuple()
_lowerCAmelCase : Optional[int] = fx_model(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : List[str] = model_class.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
_lowerCAmelCase : str = fx_model_loaded(**UpperCamelCase_ ).to_tuple()
self.assertEqual(
len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase : Any = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Tuple = pt_model_class(UpperCamelCase_ ).eval()
_lowerCAmelCase : Tuple = model_class(UpperCamelCase_ , dtype=jnp.floataa )
_lowerCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(UpperCamelCase_ , fx_model.params )
_lowerCAmelCase : Union[str, Any] = pt_inputs['''input_ids'''].shape
_lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Any = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowerCAmelCase : Dict = pt_model(**UpperCamelCase_ ).to_tuple()
_lowerCAmelCase : Optional[Any] = fx_model(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Any = pt_model_class.from_pretrained(UpperCamelCase_ , from_flax=UpperCamelCase_ )
with torch.no_grad():
_lowerCAmelCase : Dict = pt_model_loaded(**UpperCamelCase_ ).to_tuple()
self.assertEqual(
len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Dict = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 424 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Union[str, Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Any = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) ) | 189 | 0 |
import math
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1_0001 ):
'''simple docstring'''
try:
lowercase_ = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
lowercase_ = []
lowercase_ = 2
while len(__lowerCamelCase ) < nth:
if is_prime(__lowerCamelCase ):
primes.append(__lowerCamelCase )
num += 1
else:
num += 1
return primes[len(__lowerCamelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 601 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
"""simple docstring"""
lowerCAmelCase__ = "all_checks"
lowerCAmelCase__ = "basic_checks"
lowerCAmelCase__ = "no_checks"
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict , __lowerCamelCase: Optional[int]=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase_ = " for " + verification_name if verification_name is not None else ""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: bool = True ):
'''simple docstring'''
if record_checksum:
lowercase_ = shaaaa()
with open(__lowerCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(__lowerCamelCase )
lowercase_ = m.hexdigest()
else:
lowercase_ = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 601 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__A : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__A : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__A : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def a_ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ):
UpperCamelCase : List[str] = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCamelCase : Union[str, Any] = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : Optional[int] = TER(
normalized=SCREAMING_SNAKE_CASE_ , no_punct=SCREAMING_SNAKE_CASE_ , asian_support=SCREAMING_SNAKE_CASE_ , case_sensitive=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : int = sb_ter.corpus_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 499 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCamelCase ( _UpperCAmelCase ):
def a_ ( self ):
UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def a_ ( self ):
UpperCamelCase : List[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase : List[str] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def a_ ( self ):
UpperCamelCase : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
UpperCamelCase : int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def a_ ( self ):
UpperCamelCase : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def a_ ( self ):
UpperCamelCase : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self ):
UpperCamelCase : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a_ ( self ):
import PIL.Image
UpperCamelCase : Any = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects:
UpperCamelCase : Union[str, Any] = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
UpperCamelCase , UpperCamelCase : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Tuple = pa.BufferReader(snake_case_ ) if isinstance(snake_case_ ,pa.Buffer ) else pa.memory_map(snake_case_ )
UpperCamelCase : Union[str, Any] = pa.ipc.open_stream(snake_case_ )
UpperCamelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : List[str] ,snake_case_ : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=snake_case_ ,features=snake_case_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCamelCase : Optional[Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase : Dict = pa.ipc.open_stream(snake_case_ )
UpperCamelCase : pa.Table = f.read_all()
UpperCamelCase : str = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case_ )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
with pytest.raises(snake_case_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=[1, 2] )
UpperCamelCase , UpperCamelCase : List[str] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 1_0] )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
with pytest.raises(snake_case_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=1_0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 1_0] )
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=2 )
UpperCamelCase , UpperCamelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Any ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
UpperCamelCase : List[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
UpperCamelCase , UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = pa.BufferOutputStream()
UpperCamelCase : Dict = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
UpperCamelCase , UpperCamelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Optional[int] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
UpperCamelCase : str = os.path.join(snake_case_ ,"""test.arrow""" )
with ArrowWriter(path=snake_case_ ,schema=pa.schema(snake_case_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
UpperCamelCase , UpperCamelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(snake_case_ ,1 )
def A_ ( snake_case_ : Any ):
'''simple docstring'''
if pa.types.is_list(snake_case_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Optional[Any] ):
'''simple docstring'''
if isinstance(lst[0] ,snake_case_ ):
change_first_primitive_element_in_list(lst[0] ,snake_case_ )
else:
UpperCamelCase : Optional[Any] = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" ,[(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A_ ( snake_case_ : Dict ,snake_case_ : Tuple ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = pa.array(TypedSequence(snake_case_ ,optimized_int_type=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" ,[
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] ,)
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A_ ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : List[Any] ):
'''simple docstring'''
# in range
UpperCamelCase : Union[str, Any] = pa.array(OptimizedTypedSequence(snake_case_ ,col=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCamelCase : Dict = copy.deepcopy(snake_case_ )
UpperCamelCase : Optional[int] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case_ ,snake_case_ )
UpperCamelCase : Any = pa.array(OptimizedTypedSequence(snake_case_ ,col=snake_case_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" ,[False, True] )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=snake_case_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def A_ ( snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Tuple = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case_ ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(snake_case_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Dict = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCamelCase : Union[str, Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase : pa.Table = pq.read_table(snake_case_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" ,[False, True] )
def A_ ( snake_case_ : Tuple ,snake_case_ : List[str] ):
'''simple docstring'''
import PIL.Image
UpperCamelCase : Tuple = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(snake_case_ ,format="""png""" )
UpperCamelCase : Dict = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case_ ,features=Features({"""image""": Image()} ) ,embed_local_files=snake_case_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
UpperCamelCase : Any = pa.BufferReader(output.getvalue() )
UpperCamelCase : pa.Table = pq.read_table(snake_case_ )
UpperCamelCase : Any = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] ,snake_case_ )
with open(snake_case_ ,"""rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = pa.schema([pa.field("""col_1""" ,pa.string() ,nullable=snake_case_ )] )
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case_ ) as writer:
writer._build_writer(inferred_schema=snake_case_ )
assert writer._schema == pa.schema([pa.field("""col_1""" ,pa.string() )] )
| 499 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
def lowerCAmelCase__ ( a__: str , a__: str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase = True
for i in range(a__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase = True
if a[i].islower():
_UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 618 |
def lowerCAmelCase__ ( a__: int , a__: int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(a__ , x % y )
def lowerCAmelCase__ ( a__: int , a__: int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(a__ , a__ )
def lowerCAmelCase__ ( a__: int = 2_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
for i in range(1 , n + 1 ):
_UpperCAmelCase = lcm(a__ , a__ )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 618 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def _UpperCamelCase ( __A , __A ) -> Dict:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
UpperCamelCase__ = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase__ = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
UpperCamelCase__ = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(_lowerCAmelCase )
UpperCamelCase__ = []
for value in value_array:
UpperCamelCase__ = euclidean(_lowerCAmelCase , dataset[0] )
UpperCamelCase__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase__ = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
UpperCamelCase__ = temp_dist
UpperCamelCase__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = BioGptTokenizer
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__ = "lower"
UpperCamelCase__ = ["low", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + ["<unk>"]
UpperCamelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def __a ( self ):
UpperCamelCase__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 223 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [144, 192, 240]
__SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [96, 120, 144]
__SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [64, 80, 96]
__SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 320]
__SCREAMING_SNAKE_CASE = 0.0_5
__SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 21
__SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if f""".global_rep.{i}.bias""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
if base_model:
__SCREAMING_SNAKE_CASE = """"""
else:
__SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__UpperCAmelCase )
if key[:8] == "encoder.":
__SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__SCREAMING_SNAKE_CASE = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __magic_name__ ( ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_mobilevit_config(__UpperCAmelCase )
# load original state_dict
__SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(__UpperCAmelCase ).eval()
else:
__SCREAMING_SNAKE_CASE = MobileViTForImageClassification(__UpperCAmelCase ).eval()
__SCREAMING_SNAKE_CASE = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
__SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCAmelCase , organization="""apple""" )
model.push_to_hub(__UpperCAmelCase , organization="""apple""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 109 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_UpperCamelCase = get_logger(__name__)
_UpperCamelCase = Path(__file__).parent / """model_card_template.md"""
_UpperCamelCase = uuida().hex
_UpperCamelCase = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_UpperCamelCase = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_UpperCamelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _a ( _snake_case = None ):
"""simple docstring"""
UpperCAmelCase = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if token is None:
UpperCAmelCase = HfFolder.get_token()
if organization is None:
UpperCAmelCase = whoami(_snake_case )["""name"""]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_snake_case , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase = args.hub_token if hasattr(_snake_case , """hub_token""" ) else None
UpperCAmelCase = get_full_repo_name(_snake_case , token=_snake_case )
UpperCAmelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_snake_case , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase = os.path.join(args.output_dir , """README.md""" )
model_card.save(_snake_case )
def _a ( _snake_case , _snake_case = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase = str(Path(_snake_case ).as_posix() )
UpperCAmelCase = re.search(R"""snapshots/([^/]+)/""" , _snake_case )
if search is None:
return None
UpperCAmelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_UpperCamelCase = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_UpperCamelCase = os.path.join(hf_cache_home, """diffusers""")
def _a ( _snake_case = None , _snake_case = None ):
"""simple docstring"""
if new_cache_dir is None:
UpperCAmelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase = old_diffusers_cache
UpperCAmelCase = Path(_snake_case ).expanduser()
UpperCAmelCase = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_UpperCamelCase = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_UpperCamelCase = 0
else:
with open(cache_version_file) as f:
try:
_UpperCamelCase = int(f.read())
except ValueError:
_UpperCamelCase = 0
if cache_version < 1:
_UpperCamelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_UpperCamelCase = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def _a ( _snake_case , _snake_case = None ):
"""simple docstring"""
if variant is not None:
UpperCAmelCase = weights_name.split(""".""" )
UpperCAmelCase = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase = """.""".join(_snake_case )
return weights_name
def _a ( _snake_case , *,
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , ):
"""simple docstring"""
UpperCAmelCase = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase = os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
UpperCAmelCase = os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse("""0.20.0""" )
):
try:
UpperCAmelCase = hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.''' , _snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase = hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 341 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , __a : List[Any] , __a : Optional[int]=13 , __a : Tuple=7 , __a : Any=True , __a : Dict=True , __a : str=True , __a : Union[str, Any]=True , __a : Dict=99 , __a : List[str]=32 , __a : int=5 , __a : Union[str, Any]=4 , __a : Tuple=37 , __a : List[str]="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Optional[Any]=512 , __a : Tuple=16 , __a : List[Any]=2 , __a : Optional[int]=0.02 , __a : Dict=4 , ) -> int:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : Optional[int] = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : str = is_training
__lowercase : List[str] = use_attention_mask
__lowercase : Any = use_token_type_ids
__lowercase : Optional[int] = use_labels
__lowercase : Union[str, Any] = vocab_size
__lowercase : str = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[int] = initializer_range
__lowercase : Optional[int] = num_choices
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_attention_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
__lowercase : Tuple = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Tuple = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase : str = model_class_name.from_pretrained("""albert-base-v2""" )
__lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__lowercase : Any = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase : int = model(__a , attention_mask=__a )[0]
__lowercase : Dict = (1, 11, 768)
self.assertEqual(output.shape , __a )
__lowercase : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 715 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) ) | 649 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__UpperCamelCase : int = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__UpperCamelCase : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "maskformer"
_UpperCAmelCase = {"hidden_size": "mask_feature_size"}
_UpperCAmelCase = ["resnet", "swin"]
_UpperCAmelCase = ["detr"]
def __init__( self: Any , UpperCamelCase: int = 2_56 , UpperCamelCase: int = 2_56 , UpperCamelCase: float = 0.1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[Dict] = None , UpperCamelCase: Optional[Dict] = None , UpperCamelCase: float = 0.02 , UpperCamelCase: float = 1.0 , UpperCamelCase: float = 1.0 , UpperCamelCase: float = 1.0 , UpperCamelCase: float = 20.0 , UpperCamelCase: Optional[bool] = None , **UpperCamelCase: int , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case__ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ = backbone_config.pop('model_type' )
snake_case__ = CONFIG_MAPPING[backbone_model_type]
snake_case__ = config_class.from_dict(_lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case__ = DetrConfig()
else:
# verify that the decoder is supported
snake_case__ = (
decoder_config.pop('model_type' ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {','.join(self.decoders_supported )}''' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ = CONFIG_MAPPING[decoder_type]
snake_case__ = config_class.from_dict(_lowerCAmelCase )
snake_case__ = backbone_config
snake_case__ = decoder_config
# main feature dimension for the model
snake_case__ = fpn_feature_size
snake_case__ = mask_feature_size
# initializer
snake_case__ = init_std
snake_case__ = init_xavier_std
# Hungarian matcher && loss
snake_case__ = cross_entropy_weight
snake_case__ = dice_weight
snake_case__ = mask_weight
snake_case__ = use_auxiliary_loss
snake_case__ = no_object_weight
snake_case__ = output_auxiliary_logits
snake_case__ = self.decoder_config.encoder_attention_heads
snake_case__ = self.decoder_config.num_hidden_layers
super().__init__(**_lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls: Optional[Any] , UpperCamelCase: PretrainedConfig , UpperCamelCase: PretrainedConfig , **UpperCamelCase: List[str] ) -> Union[str, Any]:
return cls(
backbone_config=_lowerCAmelCase , decoder_config=_lowerCAmelCase , **_lowerCAmelCase , )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.backbone_config.to_dict()
snake_case__ = self.decoder_config.to_dict()
snake_case__ = self.__class__.model_type
return output
| 328 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """conditional_detr"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=3_0_0 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Optional[int]=2_0_4_8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : Optional[int]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1.0 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]="sine" , _lowerCAmelCase : str="resnet50" , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.25 , **_lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowercase =CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =backbone_config.get('model_type')
__lowercase =CONFIG_MAPPING[backbone_model_type]
__lowercase =config_class.from_dict(_lowerCAmelCase)
__lowercase =use_timm_backbone
__lowercase =backbone_config
__lowercase =num_channels
__lowercase =num_queries
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =init_xavier_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =encoder_layers
__lowercase =auxiliary_loss
__lowercase =position_embedding_type
__lowercase =backbone
__lowercase =use_pretrained_backbone
__lowercase =dilation
# Hungarian matcher
__lowercase =class_cost
__lowercase =bbox_cost
__lowercase =giou_cost
# Loss coefficients
__lowercase =mask_loss_coefficient
__lowercase =dice_loss_coefficient
__lowercase =cls_loss_coefficient
__lowercase =bbox_loss_coefficient
__lowercase =giou_loss_coefficient
__lowercase =focal_alpha
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowercase =self.backbone_config.to_dict()
__lowercase =self.__class__.model_type
return output
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 1e-5
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return 1_2
| 474 | 0 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
A = getLogger(__name__)
A = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __A ( a_ :List[str] , a_ :str , a_ :str , a_ :int = 8 , a_ :str = DEFAULT_DEVICE , a_ :List[str]=False , a_ :List[str]="summarization" , a_ :Optional[Any]=None , **a_ :Optional[Any] , ) -> Dict:
__a : List[Any] = Path(a_).open('''w''' , encoding='''utf-8''')
__a : Optional[Any] = str(a_)
__a : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(a_).to(a_)
if fpaa:
__a : List[str] = model.half()
__a : List[str] = AutoTokenizer.from_pretrained(a_)
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""") # if this is wrong, check config.model_type.
__a : Optional[int] = time.time()
# update config with task specific params
use_task_specific_params(a_ , a_)
if prefix is None:
__a : Tuple = prefix or getattr(model.config , '''prefix''' , '''''') or ''''''
for examples_chunk in tqdm(list(chunks(a_ , a_))):
__a : Optional[Any] = [prefix + text for text in examples_chunk]
__a : Optional[Any] = tokenizer(a_ , return_tensors='''pt''' , truncation=a_ , padding='''longest''').to(a_)
__a : List[str] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a_ , )
__a : str = tokenizer.batch_decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_)
for hypothesis in dec:
fout.write(hypothesis + '''\n''')
fout.flush()
fout.close()
__a : List[str] = int(time.time() - start_time) # seconds
__a : List[str] = len(a_)
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4)}
def __A ( ) -> List[str]:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''')
def __A ( a_ :List[Any]=True) -> int:
__a : Any = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a_ , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''input_path''' , type=a_ , help='''like cnn_dm/test.source''')
parser.add_argument('''save_path''' , type=a_ , help='''where to save summaries''')
parser.add_argument('''--reference_path''' , type=a_ , required=a_ , help='''like cnn_dm/test.target''')
parser.add_argument('''--score_path''' , type=a_ , required=a_ , default='''metrics.json''' , help='''where to save metrics''')
parser.add_argument('''--device''' , type=a_ , required=a_ , default=a_ , help='''cuda, cuda:1, cpu etc.''')
parser.add_argument(
'''--prefix''' , type=a_ , required=a_ , default=a_ , help='''will be added to the begininng of src examples''')
parser.add_argument('''--task''' , type=a_ , default='''summarization''' , help='''used for task_specific_params + metrics''')
parser.add_argument('''--bs''' , type=a_ , default=8 , required=a_ , help='''batch size''')
parser.add_argument(
'''--n_obs''' , type=a_ , default=-1 , required=a_ , help='''How many observations. Defaults to all.''')
parser.add_argument('''--fp16''' , action='''store_true''')
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''')
parser.add_argument(
'''--info''' , nargs='''?''' , type=a_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__a , __a : Union[str, Any] = parser.parse_known_args()
__a : List[str] = parse_numeric_n_bool_cl_kwargs(a_)
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""")
__a : Optional[Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]
if args.n_obs > 0:
__a : List[str] = examples[: args.n_obs]
Path(args.save_path).parent.mkdir(exist_ok=a_)
if args.reference_path is None and Path(args.score_path).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""")
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''')
__a : List[Any] = generate_summaries_or_translations(
a_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a_ , )
if args.reference_path is None:
return {}
# Compute scores
__a : int = calculate_bleu if '''translation''' in args.task else calculate_rouge
__a : Optional[int] = [x.rstrip() for x in open(args.save_path).readlines()]
__a : List[Any] = [x.rstrip() for x in open(args.reference_path).readlines()][: len(a_)]
__a : dict = score_fn(a_ , a_)
scores.update(a_)
if args.dump_args:
scores.update(a_)
if args.info:
__a : Optional[int] = args.info
if verbose:
print(a_)
if args.score_path is not None:
json.dump(a_ , open(args.score_path , '''w'''))
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 101 |
"""simple docstring"""
from math import isqrt, loga
def __A ( a_ :int) -> list[int]:
__a : int = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , a_ , a_):
__a : int = False
return [i for i in range(2 , a_) if is_prime[i]]
def __A ( a_ :int = 80_08_00 , a_ :int = 80_08_00) -> int:
__a : str = degree * loga(a_)
__a : Tuple = int(a_)
__a : int = calculate_prime_numbers(a_)
__a : List[Any] = 0
__a : Optional[Any] = 0
__a : Dict = len(a_) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }') | 101 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Union[str, Any] = '''
Human: <<task>>
Assistant: '''
_lowerCamelCase : Optional[Any] = '''huggingface-tools/default-prompts'''
_lowerCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple="run" ):
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCAmelCase__ ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE = cached_file(
UpperCAmelCase__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 403 | class lowercase : # Public class to implement a graph
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = row
SCREAMING_SNAKE_CASE = col
SCREAMING_SNAKE_CASE = graph
def __snake_case( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase )
def __snake_case( self : Any ) -> int: # And finally, count all islands.
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
count += 1
return count
| 403 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int
__magic_name__ :TreeNode | None = None
__magic_name__ :TreeNode | None = None
__A = namedtuple("""CoinsDistribResult""", """moves excess""")
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase__ , lowerCAmelCase__ :str = get_distrib(node.left )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = get_distrib(node.right )
lowerCAmelCase__ :Union[str, Any] = 1 - left_distrib_excess
lowerCAmelCase__ :List[str] = 1 - right_distrib_excess
lowerCAmelCase__ :Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
lowerCAmelCase__ :Optional[int] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A = {"""UserAgent""": UserAgent().random}
def __A (_SCREAMING_SNAKE_CASE ) ->dict:
"""simple docstring"""
lowerCAmelCase__ :Dict = script.contents[0]
lowerCAmelCase__ :int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = F"https://www.instagram.com/{username}/"
lowerCAmelCase__ :List[str] = self.get_json()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = requests.get(self.url , headers=__UpperCAmelCase ).text
lowerCAmelCase__ :Optional[int] = BeautifulSoup(__UpperCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self ):
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def __A (_SCREAMING_SNAKE_CASE = "github" ) ->None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCAmelCase__ :str = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 560 | 1 |
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
_UpperCAmelCase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_UpperCAmelCase =""
_UpperCAmelCase =""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_UpperCAmelCase , _UpperCAmelCase =0, 0
# length[i] shows the length of palindromic substring with center i
_UpperCAmelCase =[1 for i in range(len(_lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
_UpperCAmelCase =0
for j in range(len(_lowerCamelCase ) ):
_UpperCAmelCase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_UpperCAmelCase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_UpperCAmelCase =j - k + 1 # noqa: E741
_UpperCAmelCase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_UpperCAmelCase =length[j]
_UpperCAmelCase =j
# create that string
_UpperCAmelCase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 408 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
if len(_lowerCamelCase ) == 0:
return False
_UpperCAmelCase =len(_lowerCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _lowerCamelCase )
else:
return binary_search(a_list[midpoint + 1 :] , _lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Tuple = input('Enter numbers separated by comma:\n').strip()
snake_case__ : str = [int(item.strip()) for item in user_input.split(',')]
snake_case__ : Union[str, Any] = int(input('Enter the number to be found in the list:\n').strip())
snake_case__ : str = '' if binary_search(sequence, target) else 'not '
print(F"""{target} was {not_str}found in {sequence}""")
| 408 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( a ):
__snake_case , __snake_case = np.shape(a )
if rows != columns:
__snake_case = (
'\'table\' has to be of square shaped array but got a '
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(a )
__snake_case = np.zeros((rows, columns) )
__snake_case = np.zeros((rows, columns) )
for i in range(a ):
for j in range(a ):
__snake_case = sum(lower[i][k] * upper[k][j] for k in range(a ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
__snake_case = (table[i][j] - total) / upper[j][j]
__snake_case = 1
for j in range(a , a ):
__snake_case = sum(lower[i][k] * upper[k][j] for k in range(a ) )
__snake_case = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a , a ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(a ):
print(f'{i}\t\t{d}' )
def lowerCamelCase__ ( a , a , a ):
for j in range(a ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase__ ( a , a , a , a ):
__snake_case = [float('inf' )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(a , a , a )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input("""Enter number of vertices: """).strip())
_lowercase = int(input("""Enter number of edges: """).strip())
_lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
_lowercase , _lowercase , _lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
_lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
_lowercase = int(input("""\nEnter shortest path source:""").strip())
_lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 427 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A ( unittest.TestCase ):
lowerCamelCase : Tuple = StableDiffusionLDMaDPipeline
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(lowerCamelCase__ )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
lowercase__ = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = ldmad_pipe(**lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb[0, -3:, -3:, -1]
lowercase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowercase__ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
lowercase__ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
lowercase__ = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs["""prompt"""]]
# forward
lowercase__ = ldmad_pipe(**lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb_slice_a[0, -3:, -3:, -1]
lowercase__ = depth_slice_a[0, -3:, -1]
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs.pop("""prompt""" )]
lowercase__ = ldmad_pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""pt""" , )
lowercase__ = text_inputs["""input_ids"""].to(lowerCamelCase__ )
lowercase__ = ldmad_pipe.text_encoder(lowerCamelCase__ )[0]
lowercase__ = prompt_embeds
# forward
lowercase__ = ldmad_pipe(**lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb_slice_a[0, -3:, -3:, -1]
lowercase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
lowercase__ = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
lowercase__ = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """french fries"""
lowercase__ = ldmad_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb[0, -3:, -3:, -1]
lowercase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowercase__ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
lowercase__ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ) -> List[str]:
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 64, 64) )
lowercase__ = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
lowercase__ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
lowercase__ = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = ldmad_pipe(**lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb[0, -3:, -3:, -1].flatten()
lowercase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowercase__ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
lowercase__ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 64, 64) )
lowercase__ = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
lowercase__ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = ldmad_pipe(**lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = 0.49_55_86
lowercase__ = 0.33_79_55_15
lowercase__ = 1_12.4_85_18
lowercase__ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = ldmad_pipe(**lowerCamelCase__ )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = 0.4_19_41_27
lowercase__ = 0.35_37_55_86
lowercase__ = 0.5_63_85_02
lowercase__ = 0.34_68_61_03
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 325 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__A = {
"RUCAIBox/mvp": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
lowerCamelCase : Dict = MvpTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
lowercase__ = getattr(lowerCamelCase__ , pre_tok_state.pop("""type""" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCamelCase__ )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = """post_processor"""
lowercase__ = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["""sep"""] )
if "cls" in state:
lowercase__ = tuple(state["""cls"""] )
lowercase__ = False
if state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("""trim_offsets""" , lowerCamelCase__ ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCamelCase__ , state.pop("""type""" ) )
lowercase__ = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
lowercase__ = value
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__ = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__ = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 325 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
A__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
A__ = transform(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
return image
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if "visual_encoder" in key:
A__ = re.sub('visual_encoder*' , 'vision_model.encoder' , UpperCamelCase__ )
if "blocks" in key:
A__ = re.sub(r'blocks' , 'layers' , UpperCamelCase__ )
if "attn" in key:
A__ = re.sub(r'attn' , 'self_attn' , UpperCamelCase__ )
if "norm1" in key:
A__ = re.sub(r'norm1' , 'layer_norm1' , UpperCamelCase__ )
if "norm2" in key:
A__ = re.sub(r'norm2' , 'layer_norm2' , UpperCamelCase__ )
if "encoder.norm" in key:
A__ = re.sub(r'encoder.norm' , 'post_layernorm' , UpperCamelCase__ )
if "encoder.patch_embed.proj" in key:
A__ = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , UpperCamelCase__ )
if "encoder.pos_embed" in key:
A__ = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , UpperCamelCase__ )
if "encoder.cls_token" in key:
A__ = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , UpperCamelCase__ )
if "self_attn" in key:
A__ = re.sub(r'self_attn.proj' , 'self_attn.projection' , UpperCamelCase__ )
return key
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
if config_path is not None:
A__ = BlipConfig.from_pretrained(UpperCamelCase__ )
else:
A__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
A__ = BlipForConditionalGeneration(UpperCamelCase__ ).eval()
A__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
A__ = blip_decoder(pretrained=UpperCamelCase__ , image_size=384 , vit='base' )
A__ = pt_model.eval()
A__ = pt_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCamelCase__ )
A__ = rename_key(UpperCamelCase__ )
A__ = value
hf_model.load_state_dict(UpperCamelCase__ )
A__ = 384
A__ = load_demo_image(image_size=UpperCamelCase__ , device='cpu' )
A__ = BertTokenizer.from_pretrained('bert-base-uncased' )
A__ = tokenizer(['a picture of'] ).input_ids
A__ = hf_model.generate(UpperCamelCase__ , UpperCamelCase__ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
A__ = hf_model.generate(UpperCamelCase__ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A__ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
A__ = blip_vqa(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='base' )
vqa_model.eval()
A__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCamelCase__ )
A__ = rename_key(UpperCamelCase__ )
A__ = value
A__ = BlipForQuestionAnswering(UpperCamelCase__ )
hf_vqa_model.load_state_dict(UpperCamelCase__ )
A__ = ['How many dogs are in this image?']
A__ = tokenizer(UpperCamelCase__ , return_tensors='pt' ).input_ids
A__ = hf_vqa_model.generate(UpperCamelCase__ , UpperCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
A__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
A__ = blip_itm(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='base' )
itm_model.eval()
A__ = itm_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCamelCase__ )
A__ = rename_key(UpperCamelCase__ )
A__ = value
A__ = BlipForImageTextRetrieval(UpperCamelCase__ )
A__ = ['A picture of a woman with a dog sitting in a beach']
A__ = tokenizer(
UpperCamelCase__ , return_tensors='pt' , padding='max_length' , truncation=UpperCamelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCamelCase__ )
hf_itm_model.eval()
A__ = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
A__ = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__lowerCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 536 | """simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = (1 - _cos) / 2
A__ = 1 - _cos
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = (1 + _cos) / 2
A__ = -1 - _cos
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = _sin / 2
A__ = 0
A__ = -ba
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 1 - alpha
A__ = -2 * _cos
A__ = 1 + alpha
A__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = 1 + alpha * big_a
A__ = -2 * _cos
A__ = 1 - alpha * big_a
A__ = 1 + alpha / big_a
A__ = -2 * _cos
A__ = 1 - alpha / big_a
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = (big_a + 1) - (big_a - 1) * _cos
A__ = (big_a + 1) + (big_a - 1) * _cos
A__ = (big_a - 1) - (big_a + 1) * _cos
A__ = (big_a - 1) + (big_a + 1) * _cos
A__ = 2 * sqrt(UpperCamelCase__ ) * alpha
A__ = big_a * (pmc + aaa)
A__ = 2 * big_a * mpc
A__ = big_a * (pmc - aaa)
A__ = ppmc + aaa
A__ = -2 * pmpc
A__ = ppmc - aaa
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = (big_a + 1) - (big_a - 1) * _cos
A__ = (big_a + 1) + (big_a - 1) * _cos
A__ = (big_a - 1) - (big_a + 1) * _cos
A__ = (big_a - 1) + (big_a + 1) * _cos
A__ = 2 * sqrt(UpperCamelCase__ ) * alpha
A__ = big_a * (ppmc + aaa)
A__ = -2 * big_a * pmpc
A__ = big_a * (ppmc - aaa)
A__ = pmc + aaa
A__ = 2 * mpc
A__ = pmc - aaa
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 536 | 1 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 1 |
from math import pi, sqrt, tan
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowercase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowercase = (sidea + sidea + sidea) / 2
__lowercase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> float:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
UpperCAmelCase = {'''allegro/herbert-base-cased''': 514}
UpperCAmelCase = {}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = VOCAB_FILES_NAMES
__A : Tuple = PRETRAINED_VOCAB_FILES_MAP
__A : Dict = PRETRAINED_INIT_CONFIGURATION
__A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = HerbertTokenizer
def __init__( self : Any , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : Any="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Dict="<pad>" , snake_case__ : Union[str, Any]="<mask>" , snake_case__ : int="</s>" , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :Dict = [self.cls_token_id]
lowercase :Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def __snake_case ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :List[str] = [self.sep_token_id]
lowercase :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :List[str] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 677 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=4 , ):
'''simple docstring'''
snake_case: str = parent
snake_case: Any = batch_size
snake_case: List[str] = seq_length
snake_case: Union[str, Any] = is_training
snake_case: Any = use_attention_mask
snake_case: Union[str, Any] = use_token_type_ids
snake_case: Union[str, Any] = use_labels
snake_case: Tuple = vocab_size
snake_case: Tuple = hidden_size
snake_case: Union[str, Any] = num_hidden_layers
snake_case: Optional[Any] = num_attention_heads
snake_case: int = intermediate_size
snake_case: Union[str, Any] = hidden_act
snake_case: Any = hidden_dropout_prob
snake_case: Optional[int] = attention_probs_dropout_prob
snake_case: Optional[int] = max_position_embeddings
snake_case: int = type_vocab_size
snake_case: Union[str, Any] = type_sequence_label_size
snake_case: List[str] = initializer_range
snake_case: Dict = num_choices
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: str = None
if self.use_attention_mask:
snake_case: Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case: Any = None
if self.use_token_type_ids:
snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case: Any = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case: Optional[Any] = config_and_inputs
snake_case: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case: str = config_and_inputs
snake_case: int = True
snake_case: int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case: Tuple = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ )[0]
snake_case: Tuple = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
snake_case: Optional[Any] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
snake_case: Any = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
snake_case: str = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) | 692 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 1 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Return True if there is node that has not iterated.
UpperCamelCase : List[str] = [False] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = []
queue.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = True
while queue:
UpperCamelCase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = True
UpperCamelCase : Tuple = u
return visited[t]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This array is filled by BFS and to store path
UpperCamelCase : int = [-1] * (len(SCREAMING_SNAKE_CASE ))
UpperCamelCase : Any = 0
while bfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = float("""Inf""" )
UpperCamelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
UpperCamelCase : List[Any] = parent[s]
max_flow += path_flow
UpperCamelCase : List[str] = sink
while v != source:
UpperCamelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase : Tuple = parent[v]
return max_flow
__magic_name__ : Dict = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 102 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__magic_name__ : Dict = logging.getLogger(__name__)
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to SortishSamler or not."""} )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """whether to use adafactor"""} )
__lowerCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[str] = field(
default="""linear""" , metadata={"""help""": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 102 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = BeautifulSoup(requests.get(lowercase , params=lowercase ).content , '''html.parser''' )
__lowercase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__lowercase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__a : Union[str, Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params)) | 522 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a : List[str] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 522 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] =(DPMSolverSDEScheduler,)
lowercase : Any =10
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Union[str, Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**UpperCamelCase_ )
return config
def UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Tuple = self.get_scheduler_config()
lowercase_ :int = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :Union[str, Any] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :int = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Optional[Any] = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Any = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Tuple = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Dict = output.prev_sample
lowercase_ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase_ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :Optional[int] = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Any = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :str = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[str] = output.prev_sample
lowercase_ :Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :int = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :List[str] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_model()
lowercase_ :str = self.dummy_sample_deter.to(UpperCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase_ :str = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[str] = output.prev_sample
lowercase_ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ , use_karras_sigmas=UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter.to(UpperCamelCase_ ) * scheduler.init_noise_sigma
lowercase_ :Union[str, Any] = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
lowercase_ :List[Any] = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = output.prev_sample
lowercase_ :List[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 257 | 0 |
from __future__ import annotations
__magic_name__ = tuple[int, int, int]
__magic_name__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__magic_name__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
__magic_name__ = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
__magic_name__ = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
__magic_name__ = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
__magic_name__ = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
__magic_name__ = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
__magic_name__ = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
__magic_name__ = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
__magic_name__ = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
__magic_name__ = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
__magic_name__ = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
lowercase = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
lowercase , lowercase , lowercase = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowercase = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowercase = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowercase = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
lowercase = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase = f"""Plugboard setting isn't type string ({type(_UpperCAmelCase )})"""
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
lowercase = f"""Odd number of symbols ({len(_UpperCAmelCase )})"""
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowercase = set()
for i in pbstring:
if i not in abc:
lowercase = f"""'{i}' not in list of symbols"""
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
lowercase = f"""Duplicate symbol ({i})"""
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
lowercase = {}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
lowercase = pbstring[j + 1]
lowercase = pbstring[j]
return pb
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (rotora, rotora, rotora) , _UpperCAmelCase = "" , ):
"""simple docstring"""
lowercase = text.upper()
lowercase , lowercase , lowercase = _validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
lowercase , lowercase , lowercase = rotor_position
lowercase , lowercase , lowercase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase = plugboard[symbol]
# rotor ra --------------------------
lowercase = abc.index(_UpperCAmelCase ) + rotorposa
lowercase = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
lowercase = abc.index(_UpperCAmelCase ) + rotorposa
lowercase = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
lowercase = abc.index(_UpperCAmelCase ) + rotorposa
lowercase = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase = reflector[symbol]
# 2nd rotors
lowercase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowercase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowercase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowercase = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowercase = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowercase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = '''This is my Python script that emulates the Enigma machine from WWII.'''
__magic_name__ = (1, 1, 1)
__magic_name__ = '''pictures'''
__magic_name__ = (rotora, rotora, rotora)
__magic_name__ = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 314 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 | 1 |
'''simple docstring'''
import os
from distutils.util import strtobool
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Optional[int]:
for e in env_keys:
__snake_case = int(os.environ.get(lowerCAmelCase__ , -1 ) )
if val >= 0:
return val
return default
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=False ) -> Dict:
__snake_case = os.environ.get(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
return strtobool(lowerCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]="no" ) -> Tuple:
__snake_case = os.environ.get(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
return value
| 69 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__SCREAMING_SNAKE_CASE : Dict =HfArgumentParser(InitializationArguments)
__SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__SCREAMING_SNAKE_CASE : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__SCREAMING_SNAKE_CASE : List[Any] ={
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
__SCREAMING_SNAKE_CASE : Union[str, Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__SCREAMING_SNAKE_CASE : List[str] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 428 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase_ = spec.loader.load_module()
UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase_ = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
UpperCAmelCase_ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def SCREAMING_SNAKE_CASE ( ):
__a = []
for config_class in list(CONFIG_MAPPING.values() ):
__a = False
# source code of `config_class`
__a = inspect.getsource(_lowerCAmelCase )
__a = _re_checkpoint.findall(_lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__a , __a = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__a = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__a = True
break
__a = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__a = '\n'.join(sorted(_lowerCAmelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 701 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any] , a_ : int , a_ : Tuple , a_ : Tuple ):
__a = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
__a = f"{src_lang}-{tgt_lang}"
__a = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a_ , exist_ok=a_ )
__a = os.path.join(a_ , 'README.md' )
print(f"Generating {path}" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(a_ )
# make sure we are under the root of the project
UpperCAmelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase_ = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase_ = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 490 | 0 |
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
while second != 0:
lowerCAmelCase__ = first & second
first ^= second
lowerCAmelCase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input('Enter the first number: ').strip())
UpperCamelCase = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 61 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase_ : Optional[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase_ : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase_ : Dict = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase_ : Optional[int] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 692 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A__ :
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[str] =None
# Automatically constructed
lowerCamelCase__ : ClassVar[str] ="dict"
lowerCamelCase__ : ClassVar[Any] =None
lowerCamelCase__ : str =field(default="Translation" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self ) -> str:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class A__ :
lowerCamelCase__ : Optional[List] =None
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : Optional[str] =None
# Automatically constructed
lowerCamelCase__ : ClassVar[str] ="dict"
lowerCamelCase__ : ClassVar[Any] =None
lowerCamelCase__ : str =field(default="TranslationVariableLanguages" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : str = len(self.languages ) if self.languages else None
def __call__( self ) -> Union[str, Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = set(self.languages )
if self.languages and set(_lowercase ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(_lowercase ) - lang_set ) )}) are not in valid set ({", ".join(_lowercase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(_lowercase , _lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ : int = zip(*sorted(_lowercase ) )
return {"language": languages, "translation": translations}
def lowercase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 707 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowercase_ = trt.Logger(trt.Logger.WARNING)
lowercase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowercase_ = logging.getLogger(__name__)
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowercase_ = parser.parse_args()
if args.tokenizer_name:
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowercase_ = args.per_device_eval_batch_size
lowercase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowercase_ = True
lowercase_ = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowercase_ = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowercase_ = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowercase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowercase_ = [network.get_input(i) for i in range(network.num_inputs)]
lowercase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowercase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowercase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowercase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.asarray(inputs['''input_ids'''], dtype=np.intaa )
__magic_name__ : Optional[int] = np.asarray(inputs['''attention_mask'''], dtype=np.intaa )
__magic_name__ : Tuple = np.asarray(inputs['''token_type_ids'''], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), UpperCAmelCase )
# start time
__magic_name__ : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCAmelCase ) for d_inp in d_inputs] + [int(UpperCAmelCase ), int(UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
cuda.memcpy_dtoh_async(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__magic_name__ : str = time.time()
__magic_name__ : Any = end_time - start_time
__magic_name__ : Tuple = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowercase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowercase_ = raw_datasets['''validation'''].column_names
lowercase_ = '''question''' if '''question''' in column_names else column_names[0]
lowercase_ = '''context''' if '''context''' in column_names else column_names[1]
lowercase_ = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowercase_ = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
lowercase_ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
__magic_name__ : Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__magic_name__ : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='''only_second''' if pad_on_right else '''only_first''', max_length=UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=UpperCAmelCase, return_offsets_mapping=UpperCAmelCase, padding='''max_length''', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__magic_name__ : str = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__magic_name__ : str = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__magic_name__ : Dict = tokenized_examples.sequence_ids(UpperCAmelCase )
__magic_name__ : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__magic_name__ : int = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__magic_name__ : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
lowercase_ = raw_datasets['''validation''']
# Validation Feature Creation
lowercase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowercase_ = default_data_collator
lowercase_ = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowercase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase="eval" ) ->List[str]:
"""simple docstring"""
__magic_name__ : List[str] = postprocess_qa_predictions(
examples=UpperCAmelCase, features=UpperCAmelCase, predictions=UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__magic_name__ : str = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__magic_name__ : int = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__magic_name__ : Optional[int] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCAmelCase, label_ids=UpperCAmelCase )
lowercase_ = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(UpperCAmelCase ) ) * engine.get_binding_dtype(UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowercase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowercase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowercase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowercase_ = cuda.mem_alloc(h_outputa.nbytes)
lowercase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowercase_ = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
lowercase_ = 0.0
lowercase_ = 0
lowercase_ = timeit.default_timer()
lowercase_ = None
for step, batch in enumerate(eval_dataloader):
lowercase_, lowercase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowercase_, lowercase_ = outputs
lowercase_ = torch.tensor(start_logits)
lowercase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowercase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowercase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowercase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowercase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowercase_ = nested_truncate(all_preds, len(eval_dataset))
lowercase_ = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
lowercase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowercase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 336 | 0 |
__SCREAMING_SNAKE_CASE : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : Union[str, Any] = [False] * len(__lowercase )
_snake_case : Any = [s]
_snake_case : List[Any] = True
while queue:
_snake_case : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_snake_case : Dict = True
_snake_case : Dict = u
return visited[t]
def snake_case (__lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_snake_case : Union[str, Any] = [-1] * (len(__lowercase ))
_snake_case : Any = 0
_snake_case : Any = []
_snake_case : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_snake_case : Dict = float("Inf" )
_snake_case : Any = sink
while s != source:
# Find the minimum value in select path
_snake_case : Union[str, Any] = min(__lowercase , graph[parent[s]][s] )
_snake_case : Union[str, Any] = parent[s]
max_flow += path_flow
_snake_case : List[Any] = sink
while v != source:
_snake_case : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : List[str] = parent[v]
for i in range(len(__lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 670 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE : Optional[int] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE : str = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE : Tuple = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE : List[str] = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE : Tuple = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''red''' )
plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 702 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE : int = False
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = 12
lowercase_ :List[Any] = 12
lowercase_ :Dict = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowercase_ :int = TransformeraDModel(**UpperCamelCase_ )
return model
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu'''
lowercase_ :int = self.dummy_vqvae
lowercase_ :int = self.dummy_text_encoder
lowercase_ :Any = self.dummy_tokenizer
lowercase_ :Optional[int] = self.dummy_transformer
lowercase_ :List[str] = VQDiffusionScheduler(self.num_embed )
lowercase_ :int = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase_ )
lowercase_ :List[Any] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Dict = '''teddy bear playing in the pool'''
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :str = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :int = '''cpu'''
lowercase_ :Dict = self.dummy_vqvae
lowercase_ :str = self.dummy_text_encoder
lowercase_ :List[Any] = self.dummy_tokenizer
lowercase_ :Any = self.dummy_transformer
lowercase_ :Optional[Any] = VQDiffusionScheduler(self.num_embed )
lowercase_ :List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase_ :Optional[int] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :int = '''teddy bear playing in the pool'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Dict = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :List[str] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :Dict = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowercase_ :int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowercase_ :Tuple = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :int = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 441 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = DiTPipeline
__lowercase : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowercase : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__SCREAMING_SNAKE_CASE , )
__snake_case = AutoencoderKL()
__snake_case = DDIMScheduler()
__snake_case = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE ).images
__snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = torch.manual_seed(0 )
__snake_case = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
__snake_case = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__snake_case = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
__snake_case = ['''vase''', '''umbrella''']
__snake_case = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | '''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''deberta-v2'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_2_8_1_0_0 , lowerCAmelCase__ : Optional[int]=1_5_3_6 , lowerCAmelCase__ : Dict=2_4 , lowerCAmelCase__ : Optional[Any]=2_4 , lowerCAmelCase__ : str=6_1_4_4 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=-1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Optional[int]="gelu" , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Dict = relative_attention
_UpperCAmelCase : Tuple = max_relative_positions
_UpperCAmelCase : Optional[int] = pad_token_id
_UpperCAmelCase : Optional[int] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase__ ) == str:
_UpperCAmelCase : List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
_UpperCAmelCase : Any = pos_att_type
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = kwargs.get("pooler_hidden_size" , lowerCAmelCase__ )
_UpperCAmelCase : Any = pooler_dropout
_UpperCAmelCase : Any = pooler_hidden_act
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 1_2
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__ , framework=lowerCAmelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 494 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=10, lowerCamelCase=3, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=10, lowerCamelCase=0.0_2, lowerCamelCase=0.9, lowerCamelCase=None, ) -> Any:
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : Optional[int] = image_size
_lowercase : Union[str, Any] = num_channels
_lowercase : int = patch_size
_lowercase : str = tubelet_size
_lowercase : List[Any] = num_frames
_lowercase : str = is_training
_lowercase : Any = use_labels
_lowercase : List[Any] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : str = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : Dict = mask_ratio
_lowercase : int = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowercase : List[Any] = (image_size // patch_size) ** 2
_lowercase : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowercase : Optional[int] = int(mask_ratio * self.seq_length)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
_lowercase : str = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[str] = VideoMAEModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = VideoMAEForPreTraining(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Union[str, Any] = torch.ones((self.num_masks,))
_lowercase : Union[str, Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
_lowercase : Union[str, Any] = mask.expand(self.batch_size, -1).bool()
_lowercase : str = model(lowerCamelCase, lowerCamelCase)
# model only returns predictions for masked patches
_lowercase : Optional[int] = mask.sum().item()
_lowercase : Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
_lowercase : Any = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase_ : List[str] = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ : str = False
lowercase_ : Tuple = False
lowercase_ : Tuple = False
lowercase_ : str = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = VideoMAEModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = copy.deepcopy(lowerCamelCase)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : int = torch.ones((self.model_tester.num_masks,))
_lowercase : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
_lowercase : int = mask.expand(self.model_tester.batch_size, -1).bool()
_lowercase : List[str] = bool_masked_pos.to(lowerCamelCase)
if return_labels:
if model_class in [
*get_values(lowerCamelCase),
]:
_lowercase : int = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowercase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Tuple = [*signature.parameters.keys()]
_lowercase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = VideoMAEModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = True
for model_class in self.all_model_classes:
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : Tuple = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowercase : Any = True
_lowercase : Tuple = False
_lowercase : List[str] = True
_lowercase : str = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : Optional[int] = True
_lowercase : List[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : str = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : str = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
_lowercase : Optional[Any] = len(lowerCamelCase)
# Check attention is always last and order is fine
_lowercase : Optional[int] = True
_lowercase : Union[str, Any] = True
_lowercase : Dict = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : List[str] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
self.assertEqual(out_len + 1, len(lowerCamelCase))
_lowercase : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Tuple = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Dict = outputs.hidden_states
_lowercase : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
_lowercase : Dict = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : Optional[int] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], )
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : List[str] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowercase : Dict = np.load(lowerCamelCase_ )
return list(lowerCamelCase_ )
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics').to(
lowerCamelCase)
_lowercase : int = self.default_image_processor
_lowercase : str = prepare_video()
_lowercase : Tuple = image_processor(lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : int = model(**lowerCamelCase)
# verify the logits
_lowercase : List[str] = torch.Size((1, 4_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : str = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Dict = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short').to(lowerCamelCase)
_lowercase : Tuple = self.default_image_processor
_lowercase : str = prepare_video()
_lowercase : Optional[int] = image_processor(lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# add boolean mask, indicating which patches to mask
_lowercase : Any = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos', filename='bool_masked_pos.pt')
_lowercase : Any = torch.load(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Optional[int] = model(**lowerCamelCase)
# verify the logits
_lowercase : int = torch.Size([1, 14_08, 15_36])
_lowercase : List[str] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]], device=lowerCamelCase)
self.assertEqual(outputs.logits.shape, lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `True`)
_lowercase : Union[str, Any] = torch.tensor([0.5_1_4_2], device=lowerCamelCase)
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `False`)
_lowercase : Any = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short', norm_pix_loss=lowerCamelCase).to(
lowerCamelCase)
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : str = torch.tensor(torch.tensor([0.6_4_6_9]), device=lowerCamelCase)
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4))
| 716 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase( _a ):
lowercase_ : int = ["""image_processor""", """tokenizer"""]
lowercase_ : List[str] = """CLIPImageProcessor"""
lowercase_ : Union[str, Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : Dict = kwargs.pop('feature_extractor')
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCamelCase, lowerCamelCase)
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
_lowercase : Optional[Any] = self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if images is not None:
_lowercase : List[Any] = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is not None and images is not None:
_lowercase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase), tensor_type=lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : int = self.tokenizer.model_input_names
_lowercase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase, )
return self.image_processor_class
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase, )
return self.image_processor
| 354 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = LongformerTokenizer
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Any = LongformerTokenizerFast
lowerCAmelCase__ : Union[str, Any] = True
def _lowerCamelCase ( self : int ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowercase : List[str] = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
_lowercase : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase : Union[str, Any] = {'unk_token': '<unk>'}
_lowercase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def _lowerCamelCase ( self : Union[str, Any] ,**UpperCamelCase : str ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def _lowerCamelCase ( self : int ,**UpperCamelCase : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
_lowercase : Any = 'lower newer'
_lowercase : Tuple = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : Tuple ) -> List[Any]:
_lowercase : Tuple = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowercase : int = 'lower newer'
_lowercase : Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase : Optional[int] = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : List[str] = tokens + [tokenizer.unk_token]
_lowercase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
def _lowerCamelCase ( self : str ) -> Optional[Any]:
_lowercase : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=UpperCamelCase ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=UpperCamelCase ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def _lowerCamelCase ( self : Dict ) -> str:
_lowercase : List[str] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
_lowercase : int = tokenizer.encode('sequence builders' ,add_special_tokens=UpperCamelCase )
_lowercase : List[str] = tokenizer.encode('multi-sequence build' ,add_special_tokens=UpperCamelCase )
_lowercase : Dict = tokenizer.encode(
'sequence builders' ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : List[str] = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ,UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
_lowercase : int = self.get_tokenizer()
_lowercase : int = 'Encode this sequence.'
_lowercase : Union[str, Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowercase : List[Any] = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Dict = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowercase : Any = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
_lowercase : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
# Testing spaces after special tokens
_lowercase : Union[str, Any] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase )} ) # mask token has a left space
_lowercase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_lowercase : str = 'Encode <mask> sequence'
_lowercase : Optional[int] = 'Encode <mask>sequence'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase )
_lowercase : int = encoded.index(UpperCamelCase )
_lowercase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase )
_lowercase : List[str] = encoded.index(UpperCamelCase )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
def _lowerCamelCase ( self : Dict ) -> int:
pass
def _lowerCamelCase ( self : Dict ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
_lowercase : str = self.tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
_lowercase : Union[str, Any] = 'A, <mask> AllenNLP sentence.'
_lowercase : List[str] = tokenizer_r.encode_plus(UpperCamelCase ,add_special_tokens=UpperCamelCase ,return_token_type_ids=UpperCamelCase )
_lowercase : Optional[int] = tokenizer_p.encode_plus(UpperCamelCase ,add_special_tokens=UpperCamelCase ,return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_lowercase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowercase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _lowerCamelCase ( self : List[str] ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
_lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] ,UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] ,UpperCamelCase )
def _lowerCamelCase ( self : List[str] ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase : Optional[Any] = F'''{text_of_1_token} {text_of_1_token}'''
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Tuple = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Any = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Any = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Optional[int] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,) | 125 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
# TODO Update this
A = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : str = "esm"
def __init__( self : str ,UpperCamelCase : Tuple=None ,UpperCamelCase : Union[str, Any]=None ,UpperCamelCase : str=None ,UpperCamelCase : str=768 ,UpperCamelCase : List[str]=12 ,UpperCamelCase : Dict=12 ,UpperCamelCase : Any=3072 ,UpperCamelCase : List[str]=0.1 ,UpperCamelCase : int=0.1 ,UpperCamelCase : int=1026 ,UpperCamelCase : int=0.0_2 ,UpperCamelCase : Optional[Any]=1e-12 ,UpperCamelCase : str="absolute" ,UpperCamelCase : Tuple=True ,UpperCamelCase : int=None ,UpperCamelCase : Union[str, Any]=False ,UpperCamelCase : Tuple=False ,UpperCamelCase : Optional[int]=None ,UpperCamelCase : Any=None ,**UpperCamelCase : Dict ,) -> str:
super().__init__(pad_token_id=UpperCamelCase ,mask_token_id=UpperCamelCase ,**UpperCamelCase )
_lowercase : Any = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : List[str] = initializer_range
_lowercase : Any = layer_norm_eps
_lowercase : Optional[int] = position_embedding_type
_lowercase : int = use_cache
_lowercase : Dict = emb_layer_norm_before
_lowercase : Optional[int] = token_dropout
_lowercase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_lowercase : str = EsmFoldConfig()
elif isinstance(UpperCamelCase ,UpperCamelCase ):
_lowercase : Tuple = EsmFoldConfig(**UpperCamelCase )
_lowercase : str = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_lowercase : Optional[int] = get_default_vocab_list()
else:
_lowercase : Optional[Any] = vocab_list
else:
_lowercase : Any = None
_lowercase : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,UpperCamelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def _lowerCamelCase ( self : str ) -> Tuple:
_lowercase : List[str] = super().to_dict()
if isinstance(self.esmfold_config ,UpperCamelCase ):
_lowercase : Union[str, Any] = self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : str = None
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : "TrunkConfig" = None
def _lowerCamelCase ( self : List[Any] ) -> str:
if self.trunk is None:
_lowercase : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk ,UpperCamelCase ):
_lowercase : List[str] = TrunkConfig(**self.trunk )
def _lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
_lowercase : Any = asdict(self )
_lowercase : Tuple = self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : int = 48
lowerCAmelCase__ : int = 1_024
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : Optional[int] = 128
lowerCAmelCase__ : "StructureModuleConfig" = None
def _lowerCamelCase ( self : Dict ) -> Optional[Any]:
if self.structure_module is None:
_lowercase : Any = StructureModuleConfig()
elif isinstance(self.structure_module ,UpperCamelCase ):
_lowercase : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_lowercase : Any = self.sequence_state_dim // self.sequence_head_width
_lowercase : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowerCamelCase ( self : List[Any] ) -> str:
_lowercase : int = asdict(self )
_lowercase : Any = self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : int = 384
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 16
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 12
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : int = 8
lowerCAmelCase__ : float = 0.1
lowerCAmelCase__ : int = 8
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : int = 7
lowerCAmelCase__ : int = 10
lowerCAmelCase__ : float = 1e-8
lowerCAmelCase__ : float = 1e5
def _lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 125 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "codegen"
A = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _UpperCAmelCase=5_0_4_0_0 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=4_0_9_6 , _UpperCAmelCase=2_8 , _UpperCAmelCase=1_6 , _UpperCAmelCase=6_4 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=5_0_2_5_6 , _UpperCAmelCase=5_0_2_5_6 , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Union[str, Any]:
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : int = n_ctx
__UpperCamelCase : Any = n_positions
__UpperCamelCase : int = n_embd
__UpperCamelCase : List[str] = n_layer
__UpperCamelCase : List[Any] = n_head
__UpperCamelCase : Tuple = n_inner
__UpperCamelCase : Dict = rotary_dim
__UpperCamelCase : List[Any] = activation_function
__UpperCamelCase : Tuple = resid_pdrop
__UpperCamelCase : List[Any] = embd_pdrop
__UpperCamelCase : Optional[int] = attn_pdrop
__UpperCamelCase : List[Any] = layer_norm_epsilon
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : List[str] = use_cache
__UpperCamelCase : Any = bos_token_id
__UpperCamelCase : Tuple = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> List[str]:
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , _UpperCAmelCase ):
# TODO: how to do that better?
__UpperCamelCase : Any = 0
@property
def a_ (self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction="inputs" )
__UpperCamelCase : Any = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a_ (self ) -> int:
return self._config.n_layer
@property
def a_ (self ) -> int:
return self._config.n_head
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
__UpperCamelCase : Optional[Any] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase : Tuple = seqlen + 2
__UpperCamelCase : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase : Optional[int] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
__UpperCamelCase : int = common_inputs["attention_mask"]
if self.use_past:
__UpperCamelCase : List[str] = ordered_inputs["attention_mask"].dtype
__UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def a_ (self ) -> int:
return 1_3
| 399 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = '''▁'''
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_lowerCAmelCase = {
'''google/pegasus-xsum''': 512,
}
_lowerCAmelCase = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
def __init__(self , _UpperCAmelCase , _UpperCAmelCase="<pad>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<mask_2>" , _UpperCAmelCase="<mask_1>" , _UpperCAmelCase=None , _UpperCAmelCase=1_0_3 , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
__UpperCamelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f"additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is"
f" {type(_UpperCAmelCase )}" )
__UpperCamelCase : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__UpperCamelCase : Dict = additional_special_tokens_extended
else:
__UpperCamelCase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = mask_token_sent
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
# add special tokens to encoder dict
__UpperCamelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__UpperCamelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def a_ (self ) -> int:
return len(self.sp_model ) + self.offset
def a_ (self ) -> Dict[str, int]:
__UpperCamelCase : List[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.__dict__.copy()
__UpperCamelCase : Dict = None
return state
def __setstate__(self , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ (self , _UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__UpperCamelCase : str = self.sp_model.piece_to_id(_UpperCAmelCase )
return sp_id + self.offset
def a_ (self , _UpperCAmelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__UpperCamelCase : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def a_ (self , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
__UpperCamelCase : str = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def a_ (self , _UpperCAmelCase=False ) -> Optional[int]:
return 1
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase : str = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , "wb" ) as fi:
__UpperCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 399 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : float | Decimal , lowerCAmelCase__ : float = 10**-10 ) -> int:
__a = a
while True:
__a = Decimal(snake_case__ ) - (
Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case__ ) ) < precision: # noqa: S307
return float(snake_case__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 695 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
super().setUp()
A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]:
A = 'adapt act apte'
A = 'adapt act apte'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A = 'adapt act apte'
A = ['adapt', 'act', 'ap@@', 'te']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
A = 'I am a small frog.'
A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids']
A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
A = 'I am a small frog .'
A = '.'
A = tok(A_ )['input_ids']
A = tok(A_ )['input_ids']
assert encoded[-1] == encoded_dot[0] | 91 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : str = "upernet"
def __init__( self : Union[str, Any] , snake_case : int=None , snake_case : Optional[int]=512 , snake_case : Union[str, Any]=0.02 , snake_case : Tuple=[1, 2, 3, 6] , snake_case : str=True , snake_case : Optional[int]=0.4 , snake_case : Dict=384 , snake_case : Optional[int]=256 , snake_case : Union[str, Any]=1 , snake_case : Tuple=False , snake_case : List[str]=255 , **snake_case : Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCamelCase = backbone_config.get('''model_type''' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(UpperCamelCase__ )
__UpperCamelCase = backbone_config
__UpperCamelCase = hidden_size
__UpperCamelCase = initializer_range
__UpperCamelCase = pool_scales
__UpperCamelCase = use_auxiliary_head
__UpperCamelCase = auxiliary_loss_weight
__UpperCamelCase = auxiliary_in_channels
__UpperCamelCase = auxiliary_channels
__UpperCamelCase = auxiliary_num_convs
__UpperCamelCase = auxiliary_concat_input
__UpperCamelCase = loss_ignore_index
def snake_case ( self : int ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.backbone_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 700 |
import heapq
import sys
import numpy as np
a_ = tuple[int, int]
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
__UpperCamelCase = []
__UpperCamelCase = set()
def snake_case ( self : Dict ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case ( self : Optional[Any] ):
return len(self.elements ) == 0
def snake_case ( self : Optional[int] , snake_case : Tuple , snake_case : Any ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case )
else:
# update
# print("update", item)
__UpperCamelCase = []
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case ( self : Any , snake_case : int ):
if item in self.set:
self.set.remove(snake_case )
__UpperCamelCase = []
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case ( self : Tuple ):
return self.elements[0][1]
def snake_case ( self : List[str] ):
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
self.set.remove(snake_case )
return (priority, item)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = np.array(lowercase_ )
__UpperCamelCase = np.array(lowercase_ )
return np.linalg.norm(a - b )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return consistent_heuristic(lowercase_ , lowercase_ ) // t
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ )
return ans
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
__UpperCamelCase = np.chararray((n, n) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
__UpperCamelCase = '''*'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (j, (n - 1) - i) in blocks:
__UpperCamelCase = '''#'''
__UpperCamelCase = '''-'''
__UpperCamelCase = back_pointer[goal]
while x != start:
((__UpperCamelCase) , (__UpperCamelCase)) = x
# print(x)
__UpperCamelCase = '''-'''
__UpperCamelCase = back_pointer[x]
__UpperCamelCase = '''-'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCamelCase = back_pointer[goal]
while x != start:
print(lowercase_ , end=''' ''' )
__UpperCamelCase = back_pointer[x]
print(lowercase_ )
sys.exit()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
"""simple docstring"""
for itera in range(lowercase_ ):
open_list[itera].remove_element(lowercase_ )
# print("s", s)
# print("j", j)
((__UpperCamelCase) , (__UpperCamelCase)) = s
__UpperCamelCase = (x - 1, y)
__UpperCamelCase = (x + 1, y)
__UpperCamelCase = (x, y + 1)
__UpperCamelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase_ )
__UpperCamelCase = -1
__UpperCamelCase = float('''inf''' )
if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCamelCase = g_function[s] + 1
__UpperCamelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase_ ):
if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key(
lowercase_ , 0 , lowercase_ , lowercase_ ):
open_list[j].put(
lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ = make_common_ground()
a_ = blocks_blk
# hyper parameters
a_ = 1
a_ = 1
a_ = 20
a_ = 3 # one consistent and two other inconsistent
# start and end destination
a_ = (0, 0)
a_ = (n - 1, n - 1)
a_ = 1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = {start: 0, goal: float('''inf''' )}
__UpperCamelCase = {start: -1, goal: -1}
__UpperCamelCase = []
__UpperCamelCase = set()
for i in range(lowercase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
__UpperCamelCase = []
__UpperCamelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowercase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCamelCase , __UpperCamelCase = open_list[i].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_inad.append(lowercase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCamelCase = open_list[0].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_anchor.append(lowercase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 375 | 0 |
def A__ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int ):
if height >= 1:
move_tower(height - 1 , snake_case_ , snake_case_ , snake_case_ )
move_disk(snake_case_ , snake_case_ )
move_tower(height - 1 , snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : Dict , snake_case_ : List[Any] ):
print('''moving disk from''' , snake_case_ , '''to''' , snake_case_ )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Any= int(input('''Height of hanoi: ''' ).strip() )
move_tower(snake_case_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a ) | 649 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 649 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__A ={
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__A ={
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
__A ='''▁'''
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase_ = len(self.sp_model ) - 1
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(lowerCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = []
lowerCamelCase_ = ""
lowerCamelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
lowerCamelCase_ = True
lowerCamelCase_ = []
else:
current_sub_tokens.append(lowerCamelCase_ )
lowerCamelCase_ = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def __getstate__( self ) -> int:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> Tuple:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 463 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase ={
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def _A ( _a : str ):
"""simple docstring"""
A = list(s_dict.keys() )
for key in keys:
A = r""".*/layers_(\d+)"""
A = key
if re.match(_a , _a ):
A = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , _a )
A = r"""(encoder|decoder)\/"""
if re.match(_a , _a ):
A = re.match(_a , _a ).groups()
if groups[0] == "encoder":
A = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , _a )
elif groups[0] == "decoder":
A = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , _a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A = new_key.replace(_a , _a )
print(f'{key} -> {new_key}' )
A = s_dict.pop(_a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A = s_dict[key].shape[0]
A = s_dict[key]
for idx in range(_a ):
A = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_a )
return s_dict
UpperCAmelCase ={
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def _A ( _a : Dict , _a : int ):
"""simple docstring"""
import regex as re
with open(_a , """r""" ) as f:
A = f.read()
A = re.findall(r"""(.*) = ([0-9.]*)""" , _a )
A = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A = float(_a ) if """.""" in value else int(_a )
A = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , _a )[0]
A = str(activation[1] )
A = num_experts
A = SwitchTransformersConfig(**_a )
return config
def _A ( _a : Union[str, Any] , _a : Dict , _a : Optional[Any]=None , _a : Dict="./" , _a : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
A = checkpoints.load_tax_checkpoint(_a )
if gin_file is not None:
A = convert_gin_to_config(_a , _a )
else:
A = SwitchTransformersConfig.from_pretrained(_a )
A = SwitchTransformersForConditionalGeneration(_a )
A = flax_params["""target"""]
A = flatten_dict(_a , sep="""/""" )
A = rename_keys(_a )
A = unflatten_dict(_a , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_a , _a )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
UpperCAmelCase =parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 617 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase_ ) , torch_builtin(lowerCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase_ ) , gelu_new(lowerCamelCase_ ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase__ = get_activation('''gelu''' )
lowerCAmelCase__ = get_activation('''gelu_10''' )
lowerCAmelCase__ = torch_builtin(lowerCamelCase_ )
lowerCAmelCase__ = geluaa(lowerCamelCase_ )
lowerCAmelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(lowerCamelCase_ ):
get_activation('''bogus''' )
with self.assertRaises(lowerCamelCase_ ):
get_activation(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = get_activation('''gelu''' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = acta.a | 98 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class a__ :
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase__ = [2, 1, 2, -1]
lowerCAmelCase__ = [1, 2, 3, 4]
def __SCREAMING_SNAKE_CASE ( self ) -> list[float]:
lowerCAmelCase__ = len(self.first_signal )
lowerCAmelCase__ = len(self.second_signal )
lowerCAmelCase__ = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
lowerCAmelCase__ = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 98 | 1 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : str , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Tuple , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Optional[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 15 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__lowerCamelCase : List[int] =list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__lowerCamelCase : List[int] =list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Benchmark training of model'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Verbose memory tracing'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Trace memory line by line'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save result to a CSV file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save all print statements in a log file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Whether to print environment information'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
__lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__lowerCamelCase : int =field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __lowercase , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 225 | 0 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = data
snake_case__ : List[str] = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case__ : int = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCAmelCase ( self : str , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = list(struct.unpack('''>16L''' , UpperCamelCase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case__ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.padding()
snake_case__ : Union[str, Any] = self.split_blocks()
for block in self.blocks:
snake_case__ : str = self.expand_block(UpperCamelCase__ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case__ : Optional[Any] = (b & c) | ((~b) & d)
snake_case__ : Dict = 0X5A_82_79_99
elif 20 <= i < 40:
snake_case__ : List[Any] = b ^ c ^ d
snake_case__ : Any = 0X6E_D9_EB_A1
elif 40 <= i < 60:
snake_case__ : Tuple = (b & c) | (b & d) | (c & d)
snake_case__ : List[str] = 0X8F_1B_BC_DC
elif 60 <= i < 80:
snake_case__ : str = b ^ c ^ d
snake_case__ : List[str] = 0XCA_62_C1_D6
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = (
self.rotate(UpperCamelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCamelCase__ , 30 ),
c,
d,
)
snake_case__ : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __UpperCAmelCase ( ) -> Dict:
snake_case__ : int = B'''Test String'''
assert SHAaHash(_lowerCamelCase ).final_hash() == hashlib.shaa(_lowerCamelCase ).hexdigest() # noqa: S324
def __UpperCAmelCase ( ) -> Optional[int]:
snake_case__ : str = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case__ : Dict = parser.parse_args()
snake_case__ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case__ : List[str] = f.read()
else:
snake_case__ : List[str] = bytes(_lowerCamelCase , '''utf-8''' )
print(SHAaHash(_lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'ClapFeatureExtractor'
A__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : Optional[int] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = kwargs.pop('''sampling_rate''' , __UpperCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
snake_case__ : List[str] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if audios is not None:
snake_case__ : List[Any] = self.feature_extractor(
__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and audios is not None:
snake_case__ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCAmelCase ( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : str , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.tokenizer.model_input_names
snake_case__ : Optional[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 574 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class __a ( lowercase__ ):
__UpperCamelCase : int = ['pixel_values']
def __init__( self : str ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Dict[str, int]] = None ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : bool = True ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : bool = True ,lowerCamelCase : Union[int, float] = 1 / 255 ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,**lowerCamelCase : int ,):
'''simple docstring'''
super().__init__(**snake_case_ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 256}
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case_ ,default_to_square=snake_case_ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case_ )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : Tuple ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case_ ,default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(snake_case_ ,size=size["""shortest_edge"""] ,default_to_square=snake_case_ )
return resize(snake_case_ ,size=snake_case_ ,resample=snake_case_ ,data_format=snake_case_ ,**snake_case_ )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case_ )
return center_crop(snake_case_ ,size=(size["""height"""], size["""width"""]) ,data_format=snake_case_ ,**snake_case_ )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : np.ndarray ,lowerCamelCase : float ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return rescale(snake_case_ ,scale=snake_case_ ,data_format=snake_case_ ,**snake_case_ )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : np.ndarray ,lowerCamelCase : Union[float, List[float]] ,lowerCamelCase : Union[float, List[float]] ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
return normalize(snake_case_ ,mean=snake_case_ ,std=snake_case_ ,data_format=snake_case_ ,**snake_case_ )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : ImageInput ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = None ,lowerCamelCase : bool = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[float] = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,lowerCamelCase : Optional[Union[float, List[float]]] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case_ ,default_to_square=snake_case_ )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case_ )
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=snake_case_ ,size=snake_case_ ,resample=snake_case_ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=snake_case_ ,size=snake_case_ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=snake_case_ ,scale=snake_case_ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=snake_case_ ,mean=snake_case_ ,std=snake_case_ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(snake_case_ ,snake_case_ ) for image in images]
__SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ ,tensor_type=snake_case_ )
| 109 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MaskFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_SCREAMING_SNAKE_CASE = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 163 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = image.size
lowercase__ , lowercase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
lowercase__ = np.array(A ).astype(np.floataa ) / 255.0
lowercase__ = image[None].transpose(0 , 3 , 1 , 2 )
lowercase__ = torch.from_numpy(A )
return 2.0 * image - 1.0
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase, unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self, _UpperCAmelCase = None, _UpperCAmelCase = 1, _UpperCAmelCase = 100, _UpperCAmelCase = 0.0, _UpperCAmelCase = None, _UpperCAmelCase = "pil", _UpperCAmelCase = True, ):
'''simple docstring'''
if isinstance(_UpperCAmelCase, PIL.Image.Image ):
lowercase__ = 1
elif isinstance(_UpperCAmelCase, torch.Tensor ):
lowercase__ = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}''' )
if isinstance(_UpperCAmelCase, PIL.Image.Image ):
lowercase__ = preprocess(_UpperCAmelCase )
lowercase__ , lowercase__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ = next(self.unet.parameters() ).dtype
lowercase__ = randn_tensor(_UpperCAmelCase, generator=_UpperCAmelCase, device=self.device, dtype=_UpperCAmelCase )
lowercase__ = image.to(device=self.device, dtype=_UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase, device=self.device )
lowercase__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for t in self.progress_bar(_UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
lowercase__ = torch.cat([latents, image], dim=1 )
lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase, _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
lowercase__ = self.vqvae.decode(_UpperCAmelCase ).sample
lowercase__ = torch.clamp(_UpperCAmelCase, -1.0, 1.0 )
lowercase__ = image / 2 + 0.5
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 668 | """simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> List[str]:
return EnvironmentCommand()
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :ArgumentParser ):
'''simple docstring'''
A_ : Union[str, Any] = parser.add_parser("env" )
download_parser.set_defaults(func=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = huggingface_hub.__version__
A_ : Tuple = "not installed"
A_ : int = "NA"
if is_torch_available():
import torch
A_ : List[str] = torch.__version__
A_ : Optional[int] = torch.cuda.is_available()
A_ : int = "not installed"
if is_transformers_available():
import transformers
A_ : Optional[Any] = transformers.__version__
A_ : Tuple = "not installed"
if is_accelerate_available():
import accelerate
A_ : Optional[int] = accelerate.__version__
A_ : Dict = "not installed"
if is_xformers_available():
import xformers
A_ : Tuple = xformers.__version__
A_ : Optional[int] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(snake_case ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Dict ):
'''simple docstring'''
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 454 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = IFInpaintingPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str , snake_case :List[Any]=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
A_ : Optional[Any] = torch.manual_seed(snake_case )
else:
A_ : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
A_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
A_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 454 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = '''src/diffusers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
a_ = '''
{0} = None
'''
a_ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
a_ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Tuple = _re_backend.findall(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case_ : Any = 0
snake_case_ : str = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case_ : Any = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
snake_case_ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__ ) and len(lines[line_index] ) > 1:
snake_case_ : int = lines[line_index]
snake_case_ : Optional[int] = _re_single_line_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : int = objects
else:
line_index += 1
return backend_specific_objects
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__ )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
"""simple docstring"""
if backend_specific_objects is None:
snake_case_ : Any = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case_ : Any = {}
for backend, objects in backend_specific_objects.items():
snake_case_ : Optional[Any] = """[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
snake_case_ : str = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for o in objects] )
snake_case_ : str = dummy_file
return dummy_files
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict=False ):
"""simple docstring"""
snake_case_ : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case_ : Optional[int] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
snake_case_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """utils""" )
snake_case_ : Dict = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , f'dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py' )
for backend in dummy_files.keys()
}
snake_case_ : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Any = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 48 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
def __init__( self : List[str] , lowerCamelCase : int = 0 ):
'''simple docstring'''
a__ = key
def __a ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
a__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(A__ ) ^ key ) for ch in content]
def __a ( self : int , lowerCamelCase : int , lowerCamelCase : Tuple ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
a__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(A__ ) ^ key ) for ch in content]
def __a ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] = 0 ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
a__ = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
a__ = ""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def __a ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
a__ = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
a__ = ""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def __a ( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict = 0 ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A__ , A__ ) )
except OSError:
return False
return True
def __a ( self : str , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A__ , A__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 489 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase = 250_004
_lowercase = 250_020
@require_sentencepiece
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = MBartTokenizer
_UpperCAmelCase = MBartTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = MBartTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> int:
snake_case = MBartTokenizer(A__ , keep_accents=A__ )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = '''facebook/mbart-large-en-ro'''
_UpperCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_UpperCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_UpperCAmelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCamelCase ( cls ) -> Optional[Any]:
snake_case = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case = 1
return cls
def UpperCamelCase ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertIn(A__ , self.tokenizer.all_special_ids )
snake_case = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
snake_case = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , A__ )
snake_case = 10
snake_case = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A__ )
self.assertEqual(len(A__ ) , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def UpperCamelCase ( self ) -> Dict:
snake_case = tempfile.mkdtemp()
snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
snake_case = MBartTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors='''pt''' )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors='''pt''' )
snake_case = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors='''pt''' )
snake_case = targets['''input_ids''']
snake_case = shift_tokens_right(A__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 342 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE : Union[str, Any] = """
import os
"""
SCREAMING_SNAKE_CASE : Optional[int] = """
def foo():
import os
return False
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
SCREAMING_SNAKE_CASE : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Tuple = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
SCREAMING_SNAKE_CASE : str = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : int = """
import os
try:
import bar
except:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
SCREAMING_SNAKE_CASE : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , _A )
def __A ( _A , _A ):
"""simple docstring"""
__a = os.path.join(_A , "test_file.py" )
with open(_A , "w" ) as _tmp_file:
_tmp_file.write(_A )
__a = get_imports(_A )
assert parsed_imports == ["os"]
| 525 | import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A_ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : Dict ):
__a = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
__a = AutoTokenizer.from_pretrained("xlm-roberta-base" )
__a = "The dog is cute and lives in the garden house"
__a = jnp.array([tokenizer.encode(__SCREAMING_SNAKE_CASE )] )
__a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__a = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
__a = model(__SCREAMING_SNAKE_CASE )["last_hidden_state"]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 525 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any]=13 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Any=3 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : List[Any]=[32, 64, 128] , lowerCamelCase : Optional[Any]=[1, 2, 1] , lowerCamelCase : str=[2, 2, 4] , lowerCamelCase : str=2 , lowerCamelCase : str=2.0 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : str=False , lowerCamelCase : Tuple=True , lowerCamelCase : int=0.02 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=True , lowerCamelCase : Tuple=10 , lowerCamelCase : int=8 , lowerCamelCase : Any=["stage1", "stage2"] , lowerCamelCase : Optional[int]=[1, 2] , ) -> Optional[Any]:
__snake_case : Optional[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : List[str] = image_size
__snake_case : List[Any] = patch_size
__snake_case : str = num_channels
__snake_case : Any = embed_dim
__snake_case : List[Any] = hidden_sizes
__snake_case : List[Any] = depths
__snake_case : Union[str, Any] = num_heads
__snake_case : int = window_size
__snake_case : Any = mlp_ratio
__snake_case : int = qkv_bias
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Optional[int] = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : str = use_absolute_embeddings
__snake_case : List[Any] = patch_norm
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Dict = initializer_range
__snake_case : Union[str, Any] = is_training
__snake_case : Dict = scope
__snake_case : Union[str, Any] = use_labels
__snake_case : List[Any] = type_sequence_label_size
__snake_case : str = encoder_stride
__snake_case : str = out_features
__snake_case : Any = out_indices
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[str] = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict ) -> List[Any]:
__snake_case : int = FocalNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase )
__snake_case : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Any = None
__snake_case : Optional[int] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> List[str]:
__snake_case : int = FocalNetForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Any = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : Tuple = FocalNetForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Optional[Any] = self.type_sequence_label_size
__snake_case : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Dict = 1
__snake_case : int = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : Optional[int] = FocalNetModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 , has_text_modality=lowerCamelCase )
def __snake_case ( self : str ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[str] ) -> Any:
return
def __snake_case ( self : int ) -> str:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def __snake_case ( self : str ) -> str:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def __snake_case ( self : Tuple ) -> Any:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __snake_case ( self : Optional[int] ) -> int:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __snake_case ( self : List[Any] ) -> List[str]:
pass
def __snake_case ( self : str ) -> Optional[int]:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : int = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = model_class(lowerCamelCase )
__snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] ) -> Any:
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Optional[int] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# FocalNet has a different seq_length
__snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = reshaped_hidden_states[0].shape
__snake_case : str = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : str ) -> Dict:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = 3
__snake_case : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@slow
def __snake_case ( self : Optional[Any] ) -> Tuple:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = FocalNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Dict ) -> Tuple:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case : str = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[Any] = model(**lowerCamelCase )
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Any = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = FocalNetConfig
__UpperCAmelCase : Dict = False
def __snake_case ( self : int ) -> Tuple:
__snake_case : Tuple = FocalNetModelTester(self )
| 81 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowerCAmelCase ( __a ):
snake_case : int = """dpt"""
def __init__(self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=[2, 5, 8, 1_1] , lowerCAmelCase__="project" , lowerCAmelCase__=[4, 2, 1, 0.5] , lowerCAmelCase__=[9_6, 1_9_2, 3_8_4, 7_6_8] , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=-1 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=2_5_5 , lowerCAmelCase__=0.1 , lowerCAmelCase__=[1, 1_0_2_4, 2_4, 2_4] , lowerCAmelCase__=[0, 1] , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase : str = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_UpperCAmelCase : int = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase : Union[str, Any] = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
_UpperCAmelCase : Dict = backbone_featmap_shape
_UpperCAmelCase : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_UpperCAmelCase : str = readout_type
_UpperCAmelCase : Optional[int] = reassemble_factors
_UpperCAmelCase : Union[str, Any] = neck_hidden_sizes
_UpperCAmelCase : Optional[int] = fusion_hidden_size
_UpperCAmelCase : Tuple = head_in_index
_UpperCAmelCase : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : int = use_auxiliary_head
_UpperCAmelCase : Optional[Any] = auxiliary_loss_weight
_UpperCAmelCase : Optional[int] = semantic_loss_ignore_index
_UpperCAmelCase : Optional[Any] = semantic_classifier_dropout
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : List[Any] = self.backbone_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 414 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCAmelCase__ : Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : List[str]=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Optional[int]=3 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_8_8}
SCREAMING_SNAKE_CASE = size_divisor
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
def __UpperCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str=False) -> List[Any]:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image):
SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE = size / min(__lowerCamelCase , __lowerCamelCase)
if h < w:
SCREAMING_SNAKE_CASE = size, scale * w
else:
SCREAMING_SNAKE_CASE = scale * h, size
SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size)
if max(__lowerCamelCase , __lowerCamelCase) > max_size:
SCREAMING_SNAKE_CASE = max_size / max(__lowerCamelCase , __lowerCamelCase)
SCREAMING_SNAKE_CASE = newh * scale
SCREAMING_SNAKE_CASE = neww * scale
SCREAMING_SNAKE_CASE = int(newh + 0.5), int(neww + 0.5)
SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda lowerCAmelCase__: item[0])[0]
SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : int =BridgeTowerImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self)
@property
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCamelCase , 'image_mean'))
self.assertTrue(hasattr(__lowerCamelCase , 'image_std'))
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize'))
self.assertTrue(hasattr(__lowerCamelCase , 'do_resize'))
self.assertTrue(hasattr(__lowerCamelCase , 'size'))
self.assertTrue(hasattr(__lowerCamelCase , 'size_divisor'))
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors='pt').pixel_values
SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors='pt').pixel_values
SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors='pt').pixel_values
SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 705 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase = logging.get_logger(__name__)
class a_( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , **lowerCAmelCase__ : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ['bs4'])
super().__init__(**lowerCAmelCase__)
def __UpperCamelCase ( self : str , lowerCAmelCase__ : Tuple) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE = parent.find_all(child.name , recursive=lowerCAmelCase__)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase__) else next(i for i, s in enumerate(lowerCAmelCase__ , 1) if s is child))
SCREAMING_SNAKE_CASE = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BeautifulSoup(lowerCAmelCase__ , 'html.parser')
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for element in html_code.descendants:
if type(lowerCAmelCase__) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE = html.unescape(lowerCAmelCase__).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.xpath_soup(lowerCAmelCase__)
stringaxtag_seq.append(lowerCAmelCase__)
stringaxsubs_seq.append(lowerCAmelCase__)
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError('Number of doc strings and xsubs does not correspond')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
for tagname, subs in zip(lowerCAmelCase__ , lowerCAmelCase__):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self : List[Any] , lowerCAmelCase__ : List[Any]) -> BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE = False
# Check that strings has a valid type
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE = True
elif isinstance(lowerCAmelCase__ , (list, tuple)):
if len(lowerCAmelCase__) == 0 or isinstance(html_strings[0] , lowerCAmelCase__):
SCREAMING_SNAKE_CASE = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f'''but is of type {type(lowerCAmelCase__)}.''')
SCREAMING_SNAKE_CASE = bool(isinstance(lowerCAmelCase__ , (list, tuple)) and (isinstance(html_strings[0] , lowerCAmelCase__)))
if not is_batched:
SCREAMING_SNAKE_CASE = [html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for html_string in html_strings:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_three_from_single(lowerCAmelCase__)
nodes.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = []
for node, tag_list, sub_list in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE = self.construct_xpath(lowerCAmelCase__ , lowerCAmelCase__)
xpath_strings.append(lowerCAmelCase__)
xpaths.append(lowerCAmelCase__)
# return as Dict
SCREAMING_SNAKE_CASE = {'nodes': nodes, 'xpaths': xpaths}
SCREAMING_SNAKE_CASE = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
return encoded_inputs
| 259 | 0 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = (boundary[1] - boundary[0]) / steps
snake_case_ : List[Any] = boundary[0]
snake_case_ : Tuple = boundary[1]
snake_case_ : Any = make_points(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : int = 0.0
y += (h / 2.0) * f(_UpperCamelCase )
for i in x_i:
# print(i)
y += h * f(_UpperCamelCase )
y += (h / 2.0) * f(_UpperCamelCase )
return y
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = a + h
while x < (b - h):
yield x
snake_case_ : List[Any] = x + h
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: # enter your function here
"""simple docstring"""
snake_case_ : Any = (x - 0) * (x - 0)
return y
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = 0.0 # Lower bound of integration
snake_case_ : Dict = 1.0 # Upper bound of integration
snake_case_ : List[Any] = 10.0 # define number of steps or resolution
snake_case_ : Tuple = [a, b] # define boundary of integration
snake_case_ : str = method_a(_UpperCamelCase , _UpperCamelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 60 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case : List[Any] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 124 | 0 |
import math
def lowercase__ ( _UpperCamelCase) -> list[int]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = int(math.sqrt(_UpperCamelCase)) # Size of every segment
UpperCamelCase = [True] * (end + 1)
UpperCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase)
for i in range(start * start , end + 1 , _UpperCamelCase):
UpperCamelCase = False
start += 1
prime += in_prime
UpperCamelCase = end + 1
UpperCamelCase = min(2 * end , _UpperCamelCase)
while low <= n:
UpperCamelCase = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase = math.floor(low / each) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase):
UpperCamelCase = False
for j in range(len(_UpperCamelCase)):
if temp[j] is True:
prime.append(j + low)
UpperCamelCase = high + 1
UpperCamelCase = min(high + end , _UpperCamelCase)
return prime
print(sieve(10**6))
| 410 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="[UNK]" , _SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('return_tensors' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 410 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
SCREAMING_SNAKE_CASE_ = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
SCREAMING_SNAKE_CASE_ = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Dict:
if return_pvalue:
__lowerCAmelCase = pearsonr(snake_case_ , snake_case_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case_ , snake_case_ )[0] )}
| 465 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
__lowerCAmelCase = f"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowerCAmelCase = os.path.join(_lowerCAmelCase , """README.md""" )
print(f"""Generating {path}""" )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
SCREAMING_SNAKE_CASE_ = Path(__file__).resolve().parent.parent.parent
SCREAMING_SNAKE_CASE_ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
SCREAMING_SNAKE_CASE_ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 465 | 1 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def _A ( _a : Optional[Any] ):
"""simple docstring"""
A , A = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_a ):
for j in range(_a ):
A = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase =imread("image_data/lena.jpg", 1)
# convert to its negative
UpperCAmelCase =convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 255 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase =get_logger(__name__)
UpperCAmelCase =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> jnp.ndarray:
for processor in self:
A = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
else:
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
A = temperature
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores / self.temperature
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> Dict:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
A = top_p
A = filter_value
A = min_tokens_to_keep
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = lax.top_k(lowerCamelCase_ ,scores.shape[-1] )
A = jnp.full_like(lowerCamelCase_ ,self.filter_value )
A = jax.nn.softmax(lowerCamelCase_ ,axis=-1 ).cumsum(axis=-1 )
A = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A = jnp.roll(lowerCamelCase_ ,1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
A = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
A = jnp.where(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = jax.lax.sort_key_val(lowerCamelCase_ ,lowerCamelCase_ )[-1]
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> List[Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
A = max(lowerCamelCase_ ,lowerCamelCase_ )
A = filter_value
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = scores.shape
A = jnp.full(batch_size * vocab_size ,self.filter_value )
A = min(self.top_k ,scores.shape[-1] ) # Safety check
A , A = lax.top_k(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
A = topk_scores.flatten()
A = topk_indices.flatten() + shift
A = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
A = next_scores_flat.reshape(lowerCamelCase_ ,lowerCamelCase_ )
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[Any]:
A = bos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.bos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = max_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.eos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
A = min_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
A = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = list(lowerCamelCase_ )
A = begin_index
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = 1 - jnp.bool_(cur_len - self.begin_index )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> str:
A = list(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A = force_token_array.at[index].set(lowerCamelCase_ )
A = jnp.intaa(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
def _force_token(lowerCamelCase_ ):
A = scores.shape[0]
A = self.force_token_array[generation_idx]
A = jnp.ones_like(lowerCamelCase_ ,dtype=scores.dtype ) * -float("""inf""" )
A = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
A = lax.dynamic_update_slice(lowerCamelCase_ ,lowerCamelCase_ ,(0, current_token) )
return new_scores
A = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(lowerCamelCase_ ) ,lambda: scores ,) ,)
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
A = generate_config.eos_token_id
A = generate_config.no_timestamps_token_id
A = generate_config.no_timestamps_token_id + 1
A = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ ,"""max_initial_timestamp_index""" ):
A = generate_config.max_initial_timestamp_index
else:
A = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A = model_config.vocab_size
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
A = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jnp.where((cur_len - self.begin_index) >= 1 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,lowerCamelCase_ ,)
A = jnp.where((cur_len - self.begin_index) < 2 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,lowerCamelCase_ ,lowerCamelCase_ ,)
return jnp.where(
lowerCamelCase_ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(cur_len == self.begin_index ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,lowerCamelCase_ ,)
A = self.timestamp_begin + self.max_initial_timestamp_index
A = jnp.where(
lowerCamelCase_ ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
A = jax.nn.log_softmax(lowerCamelCase_ ,axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
A = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
return scores
| 255 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = ["""torch""", """torchsde"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def __A ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def __A ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ['torch', 'torchsde'] )
| 247 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase (SCREAMING_SNAKE_CASE_ : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE = arr[j]
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def lowercase (SCREAMING_SNAKE_CASE_ : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE = inner
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def lowercase (SCREAMING_SNAKE_CASE_ : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCamelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 247 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE = int(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE = i
while n % i == 0:
SCREAMING_SNAKE_CASE = n // i
i += 1
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 116 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCamelCase_ = True
from torch.cuda.amp import autocast
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCamelCase ( lowerCamelCase_: Union[str, Any]=None , lowerCamelCase_: Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCamelCase_ )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ = field(
default=snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowerCamelCase_ = field(
default=snake_case, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase_ = field(
default=0.1, metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCamelCase_ = field(
default=0.1, metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCamelCase_ = field(
default=0.1, metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
}, )
lowerCamelCase_ = field(
default=0.1, metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'}, )
lowerCamelCase_ = field(
default=0.05, metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
}, )
lowerCamelCase_ = field(default=0.0, metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=snake_case, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ = field(
default='train+validation', metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
}, )
lowerCamelCase_ = field(
default=snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase_ = field(
default=snake_case, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
lowerCamelCase_ = field(
default=snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
lowerCamelCase_ = field(
default=snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
}, )
lowerCamelCase_ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'], metadata={'help': 'A list of characters to remove from the transcripts.'}, )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
def __call__( self : Dict , snake_case_ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
A : Union[str, Any] = [{'''input_values''': feature['''input_values''']} for feature in features]
A : Dict = [{'''input_ids''': feature['''labels''']} for feature in features]
A : List[Any] = self.processor.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
A : List[str] = self.processor.pad(
labels=snake_case_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
A : Tuple = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
A : int = labels
return batch
class _SCREAMING_SNAKE_CASE ( snake_case ):
def _UpperCAmelCase ( self : Tuple , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
A : List[str] = self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
A : Dict = self.compute_loss(snake_case_ , snake_case_ )
else:
A : str = self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
A : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A : List[Any] = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
return loss.detach()
def _lowerCamelCase ( ):
'''simple docstring'''
A : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
A : Any = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
A : Any = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
A : Tuple = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCamelCase_: Union[str, Any] ):
A : List[Any] = re.sub(lowerCamelCase_ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
A : Optional[int] = train_dataset.map(lowerCamelCase_ , remove_columns=['''sentence'''] )
A : Optional[Any] = eval_dataset.map(lowerCamelCase_ , remove_columns=['''sentence'''] )
def extract_all_chars(lowerCamelCase_: List[str] ):
A : Union[str, Any] = ''' '''.join(batch['''text'''] )
A : str = list(set(lowerCamelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
A : Tuple = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=-1 , keep_in_memory=lowerCamelCase_ , remove_columns=train_dataset.column_names , )
A : str = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=-1 , keep_in_memory=lowerCamelCase_ , remove_columns=eval_dataset.column_names , )
A : Tuple = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
A : Any = {v: k for k, v in enumerate(lowerCamelCase_ )}
A : Optional[Any] = vocab_dict[''' ''']
del vocab_dict[" "]
A : Tuple = len(lowerCamelCase_ )
A : int = len(lowerCamelCase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : Dict = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
A : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
A : Optional[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
A : Dict = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
A : Any = min(len(lowerCamelCase_ ) , data_args.max_train_samples )
A : Union[str, Any] = train_dataset.select(range(lowerCamelCase_ ) )
if data_args.max_val_samples is not None:
A : Any = eval_dataset.select(range(data_args.max_val_samples ) )
A : Optional[Any] = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCamelCase_: Any ):
A , A : int = torchaudio.load(batch['''path'''] )
A : Tuple = resampler(lowerCamelCase_ ).squeeze().numpy()
A : Tuple = 1_6000
A : Any = batch['''text''']
return batch
A : Any = train_dataset.map(
lowerCamelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
A : List[Any] = eval_dataset.map(
lowerCamelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCamelCase_: List[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
A : Dict = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowerCamelCase_ )
return batch
A : Union[str, Any] = train_dataset.map(
lowerCamelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , )
A : Union[str, Any] = eval_dataset.map(
lowerCamelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
A : Optional[int] = datasets.load_metric('''wer''' )
def compute_metrics(lowerCamelCase_: Dict ):
A : List[str] = pred.predictions
A : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=-1 )
A : List[Any] = processor.tokenizer.pad_token_id
A : List[Any] = processor.batch_decode(lowerCamelCase_ )
# we do not want to group tokens when computing the metrics
A : Any = processor.batch_decode(pred.label_ids , group_tokens=lowerCamelCase_ )
A : Tuple = wer_metric.compute(predictions=lowerCamelCase_ , references=lowerCamelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
A : Dict = DataCollatorCTCWithPadding(processor=lowerCamelCase_ , padding=lowerCamelCase_ )
# Initialize our Trainer
A : Any = CTCTrainer(
model=lowerCamelCase_ , data_collator=lowerCamelCase_ , args=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
A : Dict = model_args.model_name_or_path
else:
A : Any = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
A : Any = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
A : int = train_result.metrics
A : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
A : Dict = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('''train''' , lowerCamelCase_ )
trainer.save_metrics('''train''' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
A : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A : str = trainer.evaluate()
A : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCamelCase_ )
A : str = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('''eval''' , lowerCamelCase_ )
trainer.save_metrics('''eval''' , lowerCamelCase_ )
return results
if __name__ == "__main__":
main() | 256 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 1_0
def _UpperCAmelCase ( self : Any , **snake_case_ : Tuple ):
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**snake_case_ )
return config
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = 10
A : Dict = self.get_scheduler_config()
A : Optional[int] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
A : List[str] = scheduler.timesteps[0]
A : Any = scheduler.timesteps[1]
A : int = self.dummy_sample
A : str = 0.1 * sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**snake_case_ )
A : str = 1
scheduler.set_timesteps(snake_case_ )
A : Optional[int] = scheduler.timesteps
A : int = torch.manual_seed(0 )
A : Optional[Any] = self.dummy_model()
A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
A : Dict = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : Union[str, Any] = pred_prev_sample
A : List[str] = torch.sum(torch.abs(snake_case_ ) )
A : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : str = scheduler_class(**snake_case_ )
A : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
A : Optional[int] = scheduler.timesteps
A : Any = torch.manual_seed(0 )
A : Tuple = self.dummy_model()
A : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : str = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : str = pred_prev_sample
A : str = torch.sum(torch.abs(snake_case_ ) )
A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Optional[int] = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**snake_case_ )
A : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Tuple = scheduler_class(**snake_case_ )
A : Any = [39, 30, 12, 1, 0]
A : List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : List[Any] = scheduler_class(**snake_case_ )
A : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case_ ) | 256 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): # This function is recursive
'''simple docstring'''
lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase : Tuple = array[0]
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : int = 1
lowerCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase : Optional[int] = longest_subsequence(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Tuple = temp_array
else:
i += 1
lowerCAmelCase : List[Any] = [element for element in array[1:] if element >= pivot]
lowerCAmelCase : List[str] = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE__ )]
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 1 |
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
while a != 0:
lowerCamelCase , lowerCamelCase = b % a, a
return b
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) != 1:
lowerCamelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = 1, 0, a
lowerCamelCase , lowerCamelCase , lowerCamelCase = 0, 1, m
while va != 0:
lowerCamelCase = ua // va
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 623 |
import socket
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase = socket.gethostname()
lowerCamelCase = 12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
lowerCamelCase = sock.recv(1024 )
if not data:
break
out_file.write(UpperCAmelCase__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main() | 623 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Dict = XLMRobertaTokenizer
__lowercase : List[Any] = XLMRobertaTokenizerFast
__lowercase : Dict = True
__lowercase : Union[str, Any] = True
def lowercase ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def lowercase ( self ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase ( self ) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def lowercase ( self ) -> str:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowercase ( self ) -> str:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
_UpperCamelCase = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
_UpperCamelCase = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "Hello World!"
_UpperCamelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_UpperCamelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 706 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase_ ( lowercase ):
__lowercase : torch.FloatTensor
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("DownEncoderBlock2D",) , lowerCamelCase_=(64,) , lowerCamelCase_=2 , lowerCamelCase_=32 , lowerCamelCase_="silu" , lowerCamelCase_=True , ) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = layers_per_block
_UpperCamelCase = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([] )
# down
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCamelCase_ ) - 1
_UpperCamelCase = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
_UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
_UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1E-6 )
_UpperCamelCase = nn.SiLU()
_UpperCamelCase = 2 * out_channels if double_z else out_channels
_UpperCamelCase = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
_UpperCamelCase = False
def lowercase ( self , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = x
_UpperCamelCase = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ ):
def custom_forward(*lowerCamelCase_ ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase = down_block(lowerCamelCase_ )
# middle
_UpperCamelCase = self.mid_block(lowerCamelCase_ )
# post-process
_UpperCamelCase = self.conv_norm_out(lowerCamelCase_ )
_UpperCamelCase = self.conv_act(lowerCamelCase_ )
_UpperCamelCase = self.conv_out(lowerCamelCase_ )
return sample
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("UpDecoderBlock2D",) , lowerCamelCase_=(64,) , lowerCamelCase_=2 , lowerCamelCase_=32 , lowerCamelCase_="silu" , lowerCamelCase_="group" , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = layers_per_block
_UpperCamelCase = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([] )
_UpperCamelCase = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
_UpperCamelCase = list(reversed(lowerCamelCase_ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = i == len(lowerCamelCase_ ) - 1
_UpperCamelCase = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
_UpperCamelCase = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
_UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1E-6 )
_UpperCamelCase = nn.SiLU()
_UpperCamelCase = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
_UpperCamelCase = False
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = z
_UpperCamelCase = self.conv_in(lowerCamelCase_ )
_UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ ):
def custom_forward(*lowerCamelCase_ ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
_UpperCamelCase = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
_UpperCamelCase = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_UpperCamelCase = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
_UpperCamelCase = self.conv_norm_out(lowerCamelCase_ )
else:
_UpperCamelCase = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.conv_act(lowerCamelCase_ )
_UpperCamelCase = self.conv_out(lowerCamelCase_ )
return sample
class lowerCamelCase_ ( nn.Module ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="random" , lowerCamelCase_=False , lowerCamelCase_=True ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = n_e
_UpperCamelCase = vq_embed_dim
_UpperCamelCase = beta
_UpperCamelCase = legacy
_UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase = self.used.shape[0]
_UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase = self.re_embed
_UpperCamelCase = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase = n_e
_UpperCamelCase = sane_index_shape
def lowercase ( self , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = inds.shape
assert len(lowerCamelCase_ ) > 1
_UpperCamelCase = inds.reshape(ishape[0] , -1 )
_UpperCamelCase = self.used.to(lowerCamelCase_ )
_UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase = match.argmax(-1 )
_UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = inds.shape
assert len(lowerCamelCase_ ) > 1
_UpperCamelCase = inds.reshape(ishape[0] , -1 )
_UpperCamelCase = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase = 0 # simply set to zero
_UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
_UpperCamelCase = self.embedding(lowerCamelCase_ ).view(z.shape )
_UpperCamelCase = None
_UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase = self.remap_to_used(lowerCamelCase_ )
_UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
if self.remap is not None:
_UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase = self.unmap_to_all(lowerCamelCase_ )
_UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase = self.embedding(lowerCamelCase_ )
if shape is not None:
_UpperCamelCase = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
_UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parameters
_UpperCamelCase , _UpperCamelCase = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
_UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase = deterministic
_UpperCamelCase = torch.exp(0.5 * self.logvar )
_UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase = _UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase ( self , lowerCamelCase_ = None ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCamelCase = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase = self.mean + self.std * sample
return x
def lowercase ( self , lowerCamelCase_=None ) -> List[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=[1, 2, 3] ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return self.mean
| 589 | 0 |
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = 0
__A = len(a_ )
for i in range(n - 1 ):
for j in range(i + 1 , a_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if len(a_ ) <= 1:
return arr, 0
__A = len(a_ ) // 2
__A = arr[0:mid]
__A = arr[mid:]
__A , __A = count_inversions_recursive(a_ )
__A , __A = count_inversions_recursive(a_ )
__A , __A = _count_cross_inversions(a_ , a_ )
__A = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = []
__A = __A = __A = 0
while i < len(a_ ) and j < len(a_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(a_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__A = count_inversions_bf(a_ )
__A , __A = count_inversions_recursive(a_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , a_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__A = count_inversions_bf(a_ )
__A , __A = count_inversions_recursive(a_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , a_ )
# an empty list should also have zero inversions
__A = []
__A = count_inversions_bf(a_ )
__A , __A = count_inversions_recursive(a_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , a_ )
if __name__ == "__main__":
main()
| 55 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( UpperCamelCase="ro" , UpperCamelCase="en" , UpperCamelCase="wmt16" , UpperCamelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowerCAmelCase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
lowerCAmelCase__ : Any = datasets.load_dataset(UpperCamelCase , UpperCamelCase )
if save_dir is None:
lowerCAmelCase__ : Optional[Any] = f"""{dataset}-{pair}"""
lowerCAmelCase__ : Optional[Any] = Path(UpperCamelCase )
save_dir.mkdir(exist_ok=UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowerCAmelCase__ : str = """val""" if split == """validation""" else split
lowerCAmelCase__ : Optional[int] = save_dir.joinpath(f"""{fn}.source""" )
lowerCAmelCase__ : Any = save_dir.joinpath(f"""{fn}.target""" )
lowerCAmelCase__ : Union[str, Any] = src_path.open("""w+""" )
lowerCAmelCase__ : Optional[int] = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCAmelCase__ : Optional[int] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 565 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[Any]):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
| 708 |
import os
import pytest
from attr import dataclass
a ="""us-east-1""" # defaults region
@dataclass
class A_ :
_UpperCAmelCase : str
_UpperCAmelCase : Tuple = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_UpperCAmelCase : Optional[int] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5_500,
}
_UpperCAmelCase : int = {**hyperparameters, '''max_steps''': 1_000}
@property
def lowerCAmelCase ( self : Dict):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase ( self : List[str]):
return F"{self.framework}-transfromers-test"
@property
def lowerCAmelCase ( self : List[Any]):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def lowerCAmelCase ( self : List[Any]):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
| 337 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A_ = Features({'''text''': Value('''string''' )} )
A_ = Features({'''summary''': Value('''string''' )} )
A_ = "text"
A_ = "summary"
@property
def UpperCAmelCase__ ( self) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 34 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 1 |
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : list ):
lowercase__ : Dict = 0
while len(lowerCamelCase__ ) > 1:
lowercase__ : int = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowercase__ : Dict = files.index(min(lowerCamelCase__ ) )
temp += files[min_index]
files.pop(lowerCamelCase__ )
files.append(lowerCamelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 128 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__snake_case = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__snake_case = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__snake_case = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
lowercase__ : List[Any] = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 128 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a ( __UpperCAmelCase ):
def __init__( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : int ):
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 611 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCamelCase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCamelCase_ = {
"ctrl": 2_5_6,
}
UpperCamelCase_ = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(UpperCamelCase )
return pairs
class a ( __UpperCAmelCase ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Dict = CONTROL_CODES
def __init__( self : Dict , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any]="<unk>" , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case__ )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
__lowerCAmelCase = {}
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(snake_case__ )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__lowerCAmelCase = get_pairs(snake_case__ )
if not pairs:
return token
while True:
__lowerCAmelCase = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(snake_case__ ):
try:
__lowerCAmelCase = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(snake_case__ )
__lowerCAmelCase = new_word
if len(snake_case__ ) == 1:
break
else:
__lowerCAmelCase = get_pairs(snake_case__ )
__lowerCAmelCase = "@@ ".join(snake_case__ )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase__ ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Any , snake_case__ : int ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
__lowerCAmelCase = 0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 611 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : str = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ["MobileViTFeatureExtractor"]
UpperCamelCase : Union[str, Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase : Dict = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( snake_case :List[str] , snake_case :Optional[int] , snake_case :Optional[Any] ) -> Any:
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__UpperCamelCase = json.load(snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case )] )
with open(snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case )
if strict and len(snake_case ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(snake_case ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*snake_case , sep='\n' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 293 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: torch.FloatTensor
SCREAMING_SNAKE_CASE_: Optional[torch.FloatTensor] = None
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__=0.9_9_9 ,lowerCAmelCase__="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A__ = []
for i in range(lowerCAmelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) ,lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ ,dtype=torch.floataa )
class snake_case_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = 1
@register_to_config
def __init__( self , __a = 1000 , __a = 0.0001 , __a = 0.02 , __a = "linear" , __a = None , __a = True , __a = True , __a = 0 , __a = "epsilon" , __a = 1.0 , **__a , ):
"""simple docstring"""
if kwargs.get('set_alpha_to_one' , __a ) is not None:
A__ = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , __a , standard_warn=__a )
A__ = kwargs['set_alpha_to_one']
if trained_betas is not None:
A__ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0 , __a ).copy().astype(np.intaa ) )
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
return sample
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
A__ = num_inference_steps
A__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0 , __a ) * step_ratio).round().copy().astype(np.intaa )
A__ = torch.from_numpy(__a ).to(__a )
self.timesteps += self.config.steps_offset
def _UpperCAmelCase ( self , __a , __a , __a , __a = 0.0 , __a = False , __a = None , __a = True , ):
"""simple docstring"""
A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A__ = self.alphas_cumprod[timestep]
A__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A__ = model_output
elif self.config.prediction_type == "sample":
A__ = model_output
A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__a , pred_original_sample=__a )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 260 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self, _UpperCAmelCase = 1, _UpperCAmelCase = 100, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowercase__ = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase__ = audio_length_in_s * self.unet.config.sample_rate
lowercase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase__ = int(_UpperCAmelCase )
if sample_size % down_scale_factor != 0:
lowercase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
lowercase__ = int(_UpperCAmelCase )
lowercase__ = next(iter(self.unet.parameters() ) ).dtype
lowercase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase__ = randn_tensor(_UpperCAmelCase, generator=_UpperCAmelCase, device=self.device, dtype=_UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase, device=audio.device )
lowercase__ = self.scheduler.timesteps.to(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ = self.unet(_UpperCAmelCase, _UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
lowercase__ = audio.clamp(-1, 1 ).float().cpu().numpy()
lowercase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 668 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "train"
UpperCAmelCase__ = "dev"
UpperCAmelCase__ = "test"
class __A :
@staticmethod
def lowerCamelCase__ ( __snake_case : Union[str, Any] , __snake_case : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__ ( __snake_case : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__ ( __snake_case : List[InputExample] , __snake_case : List[str] , __snake_case : int , __snake_case : PreTrainedTokenizer , __snake_case : int=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : str=1 , __snake_case : Dict="[SEP]" , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Dict=0 , __snake_case : Dict=0 , __snake_case : Optional[int]=-1_0_0 , __snake_case : Optional[int]=0 , __snake_case : str=True , ) -> List[InputFeatures]:
__magic_name__: str = {label: i for i, label in enumerate(__snake_case )}
__magic_name__: Optional[Any] = []
for ex_index, example in enumerate(__snake_case ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , __snake_case , len(__snake_case ) )
__magic_name__: int = []
__magic_name__: List[Any] = []
for word, label in zip(example.words , example.labels ):
__magic_name__: Optional[Any] = tokenizer.tokenize(__snake_case )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__snake_case ) > 0:
tokens.extend(__snake_case )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__snake_case ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__: int = tokenizer.num_special_tokens_to_add()
if len(__snake_case ) > max_seq_length - special_tokens_count:
__magic_name__: List[str] = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__: Optional[int] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__: Union[str, Any] = [sequence_a_segment_id] * len(__snake_case )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__: Union[str, Any] = [cls_token] + tokens
__magic_name__: List[str] = [pad_token_label_id] + label_ids
__magic_name__: Dict = [cls_token_segment_id] + segment_ids
__magic_name__: str = tokenizer.convert_tokens_to_ids(__snake_case )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__: int = [1 if mask_padding_with_zero else 0] * len(__snake_case )
# Zero-pad up to the sequence length.
__magic_name__: Union[str, Any] = max_seq_length - len(__snake_case )
if pad_on_left:
__magic_name__: Union[str, Any] = ([pad_token] * padding_length) + input_ids
__magic_name__: Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__: Dict = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__: Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(__snake_case ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(__snake_case ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(__snake_case ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(__snake_case ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(__snake_case ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__: Any = None
features.append(
InputFeatures(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , label_ids=__snake_case ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , __snake_case : TokenClassificationTask , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : Any=False , __snake_case : Split = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
__magic_name__: Union[str, Any] = os.path.join(
__snake_case , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__snake_case ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__: Union[str, Any] = cached_features_file + """.lock"""
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__magic_name__: List[Any] = torch.load(__snake_case )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__magic_name__: int = token_classification_task.read_examples_from_file(__snake_case , __snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__: List[str] = token_classification_task.convert_examples_to_features(
__snake_case , __snake_case , __snake_case , __snake_case , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , __snake_case )
def __len__( self : str ) -> str:
return len(self.features )
def __getitem__( self : Any , __snake_case : Optional[int] ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = -1_0_0
def __init__( self : Tuple , __snake_case : TokenClassificationTask , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : str=False , __snake_case : Split = Split.train , ) -> List[Any]:
__magic_name__: Optional[int] = token_classification_task.read_examples_from_file(__snake_case , __snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__: Optional[int] = token_classification_task.convert_examples_to_features(
__snake_case , __snake_case , __snake_case , __snake_case , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__: Union[str, Any] = tf.data.Dataset.from_generator(
__snake_case , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__: str = tf.data.Dataset.from_generator(
__snake_case , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__magic_name__: Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ) -> int:
return len(self.features )
def __getitem__( self : Dict , __snake_case : int ) -> InputFeatures:
return self.features[i]
| 96 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__a = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
__a = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = " Hello world! cécé herlolip"
__a = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = dct.pop(_lowercase )
snake_case_ :Optional[int] = val
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :int = torch.load(_lowercase, map_location="""cpu""" )
snake_case_ :str = torch.hub.load("""pytorch/fairseq""", """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = emb.weight.shape
snake_case_ :Union[str, Any] = nn.Linear(_lowercase, _lowercase, bias=_lowercase )
snake_case_ :List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase=None ):
'''simple docstring'''
if not os.path.exists(_lowercase ):
snake_case_ :Any = torch.hub.load("""pytorch/fairseq""", _lowercase ).eval()
else:
snake_case_ :Dict = load_xsum_checkpoint(_lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
snake_case_ :Any = checkpoint_path.replace(""".""", """-""" )
snake_case_ :Tuple = BartConfig.from_pretrained(_lowercase )
snake_case_ :Union[str, Any] = bart.encode(_lowercase ).unsqueeze(0 )
snake_case_ :List[str] = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase, return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(_lowercase, _lowercase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
snake_case_ :Union[str, Any] = bart.state_dict()
remove_ignore_keys_(_lowercase )
snake_case_ :Tuple = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(_lowercase, _lowercase, _lowercase )
snake_case_ :Dict = BartForSequenceClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
snake_case_ :Dict = bart.predict("""mnli""", _lowercase, return_logits=_lowercase )
snake_case_ :Tuple = model(_lowercase )[0] # logits
else: # no classification heads to worry about
snake_case_ :List[str] = bart.model.state_dict()
remove_ignore_keys_(_lowercase )
snake_case_ :List[str] = state_dict["""decoder.embed_tokens.weight"""]
snake_case_ :List[str] = bart.extract_features(_lowercase )
if hf_checkpoint_name == "facebook/bart-large":
snake_case_ :List[str] = BartModel(_lowercase ).eval()
model.load_state_dict(_lowercase )
snake_case_ :Tuple = model(_lowercase ).model[0]
else:
snake_case_ :Tuple = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowercase )
if hasattr(_lowercase, """lm_head""" ):
snake_case_ :str = make_linear_from_emb(model.model.shared )
snake_case_ :Tuple = model.model(_lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
__a = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 706 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__a = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__a = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__a = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__a = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__a = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case_ :Any = k.replace(_lowercase, _lowercase )
return k
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = BigBirdPegasusConfig(**_lowercase )
snake_case_ :int = BigBirdPegasusForConditionalGeneration(_lowercase )
snake_case_ :List[str] = torch_model.state_dict()
snake_case_ :Dict = {}
# separating decoder weights
snake_case_ :Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
snake_case_ :int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items(), """tf -> hf conversion""" ):
snake_case_ :List[str] = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
snake_case_ :Optional[int] = DECODER_PATTERNS
snake_case_ :int = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
snake_case_ :Any = v.T
snake_case_ :Tuple = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), """tf -> hf conversion""" ):
snake_case_ :int = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
snake_case_ :int = REMAINING_PATTERNS
snake_case_ :Optional[int] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
snake_case_ :Tuple = v.T
snake_case_ :str = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case_ :Union[str, Any] = mapping["""model.embed_positions.weight"""]
snake_case_ :List[str] = mapping.pop("""model.embed_positions.weight""" )
snake_case_, snake_case_ :Optional[Any] = torch_model.load_state_dict(_lowercase, strict=_lowercase )
snake_case_ :Any = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = tf.train.list_variables(_lowercase )
snake_case_ :Union[str, Any] = {}
snake_case_ :Any = ["""global_step"""]
for name, shape in tqdm(_lowercase, desc="""converting tf checkpoint to dict""" ):
snake_case_ :int = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ :List[Any] = tf.train.load_variable(_lowercase, _lowercase )
snake_case_ :str = array
return tf_weights
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = get_tf_weights_as_numpy(_lowercase )
snake_case_ :Any = convert_bigbird_pegasus(_lowercase, _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__a = parser.parse_args()
__a = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 310 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
__lowerCamelCase = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def a__ ( UpperCamelCase_ : List[str] ):
UpperCAmelCase__ :List[str] = EfficientNetConfig()
UpperCAmelCase__ :Union[str, Any] = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase__ :int = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase__ :Optional[Any] = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase__ :str = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase__ :int = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase__ :Optional[int] = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase__ :List[Any] = '''huggingface/label-files'''
UpperCAmelCase__ :Optional[int] = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ :Tuple = 1_000
UpperCAmelCase__ :Dict = json.load(open(hf_hub_download(UpperCamelCase_, UpperCamelCase_, repo_type='''dataset''' ), '''r''' ) )
UpperCAmelCase__ :List[Any] = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase__ :Optional[int] = idalabel
UpperCAmelCase__ :int = {v: k for k, v in idalabel.items()}
return config
def a__ ( ):
UpperCAmelCase__ :List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ :Dict = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_ ).raw )
return im
def a__ ( UpperCamelCase_ : List[str] ):
UpperCAmelCase__ :Any = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase__ :Dict = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size}, image_mean=[0.485, 0.456, 0.406], image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163], do_center_crop=UpperCamelCase_, )
return preprocessor
def a__ ( UpperCamelCase_ : int ):
UpperCAmelCase__ :List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase__ :List[str] = sorted(set(UpperCamelCase_ ) )
UpperCAmelCase__ :int = len(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = {b: str(UpperCamelCase_ ) for b, i in zip(UpperCamelCase_, range(UpperCamelCase_ ) )}
UpperCAmelCase__ :Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase__ :List[Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase__ :int = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase__ :int = '''efficientnet.''' + item[1]
UpperCAmelCase__ :Tuple = '''classifier.weight'''
UpperCAmelCase__ :Optional[Any] = '''classifier.bias'''
return key_mapping
def a__ ( UpperCamelCase_ : Tuple, UpperCamelCase_ : List[Any], UpperCamelCase_ : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase__ :Optional[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase__ :Optional[int] = torch.from_numpy(UpperCamelCase_ ).permute(3, 2, 0, 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase__ :int = torch.from_numpy(UpperCamelCase_ ).permute(2, 3, 0, 1 )
elif "kernel" in key:
UpperCAmelCase__ :str = torch.from_numpy(np.transpose(UpperCamelCase_ ) )
else:
UpperCAmelCase__ :Dict = torch.from_numpy(UpperCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(UpperCamelCase_ )
@torch.no_grad()
def a__ ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : int, UpperCamelCase_ : Any, UpperCamelCase_ : Dict ):
UpperCAmelCase__ :Union[str, Any] = model_classes[model_name](
include_top=UpperCamelCase_, weights='''imagenet''', input_tensor=UpperCamelCase_, input_shape=UpperCamelCase_, pooling=UpperCamelCase_, classes=1_000, classifier_activation='''softmax''', )
UpperCAmelCase__ :Tuple = original_model.trainable_variables
UpperCAmelCase__ :Optional[Any] = original_model.non_trainable_variables
UpperCAmelCase__ :str = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase__ :List[Any] = param.numpy()
UpperCAmelCase__ :int = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase__ :Optional[Any] = get_efficientnet_config(UpperCamelCase_ )
UpperCAmelCase__ :str = EfficientNetForImageClassification(UpperCamelCase_ ).eval()
UpperCAmelCase__ :int = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase__ :Dict = rename_keys(UpperCamelCase_ )
replace_params(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Initialize preprocessor and preprocess input image
UpperCAmelCase__ :Any = convert_image_processor(UpperCamelCase_ )
UpperCAmelCase__ :Optional[Any] = preprocessor(images=prepare_img(), return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase__ :int = hf_model(**UpperCamelCase_ )
UpperCAmelCase__ :str = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase__ :Any = False
UpperCAmelCase__ :List[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase__ :Optional[int] = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST )
UpperCAmelCase__ :Any = image.img_to_array(UpperCamelCase_ )
UpperCAmelCase__ :Tuple = np.expand_dims(UpperCamelCase_, axis=0 )
UpperCAmelCase__ :List[str] = original_model.predict(UpperCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(UpperCamelCase_, UpperCamelCase_, atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(UpperCamelCase_ ):
os.mkdir(UpperCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(UpperCamelCase_ )
preprocessor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
UpperCAmelCase__ :Dict = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(UpperCamelCase_ )
hf_model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__lowerCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 467 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float )-> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self : int , snake_case__ : List[Any] , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[str]=30 , snake_case__ : Union[str, Any]=400 , snake_case__ : List[Any]=True , snake_case__ : Tuple=None , snake_case__ : Optional[int]=0.9 , snake_case__ : int=None , snake_case__ : List[str]=True , snake_case__ : Dict=[0.5, 0.5, 0.5] , snake_case__ : int=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__lowerCAmelCase = size if size is not None else {"shortest_edge": 30}
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 30, "width": 30}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize_and_center_crop
__lowerCAmelCase = size
__lowerCAmelCase = crop_pct
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( snake_case__ , unittest.TestCase ):
lowercase_ : Dict = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 702 |
import fire
from utils import calculate_rouge, save_json
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any]=None , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = [x.strip() for x in open(UpperCamelCase ).readlines()]
__lowerCAmelCase = [x.strip() for x in open(UpperCamelCase ).readlines()][: len(UpperCamelCase )]
__lowerCAmelCase = calculate_rouge(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
if save_path is not None:
save_json(UpperCamelCase , UpperCamelCase , indent=UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 376 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
a__ = StableDiffusionInpaintPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ = frozenset([] )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self : Tuple , a : Union[str, Any] , a : List[str]=0 ) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline(**a )
SCREAMING_SNAKE_CASE = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Any ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _UpperCAmelCase ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _UpperCAmelCase ( self : Tuple ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE = PNDMScheduler.from_pretrained(a , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 450 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_ ( ):
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 450 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Any ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ):
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ):
a__ : Union[str, Any] = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
a__ : Union[str, Any] = []
if args.gold_data_mode == "qa":
a__ : Dict = pd.read_csv(lowerCAmelCase__ , sep='''\t''' , header=lowerCAmelCase__ )
for answer_list in data[1]:
a__ : Any = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
a__ : int = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
a__ : List[Any] = [[reference] for reference in references]
a__ : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = 100.0 * em / total
a__ : List[str] = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] ):
a__ : List[str] = args.k
a__ : Dict = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
a__ : Any = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
a__ : Optional[Any] = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[str] = set(hypo.split('''\t''' )[:k] )
a__ : Tuple = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Optional[Any] = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ):
def strip_title(lowerCAmelCase__ : Optional[int] ):
if title.startswith('''"''' ):
a__ : Optional[Any] = title[1:]
if title.endswith('''"''' ):
a__ : str = title[:-1]
return title
a__ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )['''input_ids'''].to(args.device )
a__ : Optional[Any] = rag_model.rag.question_encoder(lowerCAmelCase__ )
a__ : List[Any] = question_enc_outputs[0]
a__ : Any = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
a__ : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : str = []
for docs in all_docs:
a__ : Any = [strip_title(lowerCAmelCase__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(lowerCAmelCase__ ) )
return provenance_strings
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ):
with torch.no_grad():
a__ : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
a__ : Tuple = inputs_dict.input_ids.to(args.device )
a__ : Optional[int] = inputs_dict.attention_mask.to(args.device )
a__ : Optional[Any] = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def __a ( ):
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=lowerCAmelCase__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=lowerCAmelCase__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=lowerCAmelCase__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=lowerCAmelCase__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=lowerCAmelCase__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
a__ : Dict = parser.parse_args()
a__ : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __a ( lowerCAmelCase__ : int ):
a__ : List[str] = {}
if args.model_type is None:
a__ : Optional[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
a__ : Tuple = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
a__ : List[Any] = args.n_docs
if args.index_name is not None:
a__ : str = args.index_name
if args.index_path is not None:
a__ : Tuple = args.index_path
else:
a__ : Tuple = BartForConditionalGeneration
a__ : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase__ )
a__ : List[str] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
a__ : Optional[int] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
a__ : Any = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
a__ : str = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
a__ : List[Any] = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
a__ : Optional[int] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write('''\n'''.join(lowerCAmelCase__ ) + '''\n''' )
preds_file.flush()
a__ : List[str] = []
if len(lowerCAmelCase__ ) > 0:
a__ : str = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write('''\n'''.join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = get_args()
main(args)
| 688 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "▁"
__lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 129 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = ["""keras_nlp"""]
def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ):
requires_backends(self , ["keras_nlp"] )
| 129 | 1 |
A__: Optional[int] = 6_5521
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Tuple = 1
UpperCamelCase__: Any = 0
for plain_chr in plain_text:
UpperCamelCase__: Optional[int] = (a + ord(A_)) % MOD_ADLER
UpperCamelCase__: Any = (b + a) % MOD_ADLER
return (b << 16) | a
| 380 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A__: Any = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 380 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 245 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
if isinstance(a , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _SCREAMING_SNAKE_CASE :
def snake_case__ ( self : List[Any] , a__ : Optional[int] , a__ : List[str] ):
pass
def snake_case__ ( self : Dict ):
pass
def snake_case__ ( self : Optional[int] ):
pass
def snake_case__ ( self : List[Any] , a__ : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : str=None , **a__ : Dict ):
__magic_name__ = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : Any , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : Any , a__ : Dict=None , **a__ : Optional[Any] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , a__ : Tuple , a__ : Optional[Any] , a__ : int , a__ : Optional[int] , a__ : int=None , **a__ : int ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , a__ : List[Any] , a__ : Optional[int] , a__ : int , a__ : Union[str, Any] , a__ : Tuple=None , **a__ : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__magic_name__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__magic_name__ = after_output[0].numpy()
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
def snake_case__ ( self : str , a__ : Any , a__ : Optional[int] , a__ : List[str] , a__ : Tuple , a__ : Optional[Any]=None , **a__ : int ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : Optional[Any] , a__ : np.ndarray , a__ : np.ndarray , a__ : float ):
__magic_name__ = np.abs((a - b) ).max()
self.assertLessEqual(a__ , a__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__ )
def snake_case__ ( self : int ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_save_load(**a__ )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__ )
@slow
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.get_pretrained_model_and_inputs()
__magic_name__ = model_a(**a__ )
__magic_name__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__ )
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__magic_name__ = model_a(**a__ )
__magic_name__ = after_outputs[0].numpy()
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : int ):
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , a__ : Any , a__ : List[Any] ):
__magic_name__ = TFViTModel(a__ , name='''vision_model''' )
__magic_name__ = TFBertModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : List[str] ):
__magic_name__ = TFViTModelTester(self )
__magic_name__ = TFBertModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : Tuple ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Dict , a__ : Any , a__ : Tuple , a__ : str , a__ : Any , a__ : Union[str, Any]=None , **a__ : List[str] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : str , a__ : int , a__ : Any ):
__magic_name__ = TFDeiTModel(a__ , name='''vision_model''' )
__magic_name__ = TFRobertaModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : Dict ):
__magic_name__ = TFDeiTModelTester(self )
__magic_name__ = TFRobertaModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , a__ : int , a__ : Dict ):
__magic_name__ = TFCLIPVisionModel(a__ , name='''vision_model''' )
__magic_name__ = TFBertModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : str ):
__magic_name__ = TFCLIPVisionModelTester(self )
__magic_name__ = TFBertModelTester(self )
__magic_name__ = clip_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=a__ )
__magic_name__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=a__ , padding=a__ , return_tensors='''np''' )
__magic_name__ = model(**a__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1E-3 ) )
| 245 | 1 |
from timeit import timeit
lowerCAmelCase_ = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = 0
snake_case_ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ ) // 2
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return s == s[::-1]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''all({name}(key) is value for key, value in test_data.items())'''
snake_case_ = F'''from __main__ import test_data, {name}'''
snake_case_ = 500000
snake_case_ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''') | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls : Any ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''optuna'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ):
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ):
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ray'''
SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''sigopt'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''wandb'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
__UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name
if len(lowerCamelCase_ ) > 1:
logger.info(
f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 79 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : Dict = {
"""input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 79 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :str = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Tuple = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :List[str] = {
'''allenai/led-base-16384''': 16384,
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = LEDTokenizer
_lowerCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]="replace" , snake_case_ : List[Any]="<s>" , snake_case_ : str="</s>" , snake_case_ : Any="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Dict="<pad>" , snake_case_ : Dict="<mask>" , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=True , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case_ )
_UpperCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(snake_case_ , state.pop("type" ) )
_UpperCAmelCase = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase ( self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : Dict , snake_case_ : str ):
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_UpperCAmelCase = value
def lowercase ( self : Union[str, Any] , *snake_case_ : Union[str, Any] , **snake_case_ : List[str] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowercase ( self : int , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Any=None ):
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Union[str, Any] , snake_case_ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
_UpperCAmelCase = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs["global_attention_mask"] ) != len(snake_case_ )
if needs_to_be_padded:
_UpperCAmelCase = len(snake_case_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 236 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: int = SpeechTaTokenizer
SCREAMING_SNAKE_CASE_: List[Any] = False
SCREAMING_SNAKE_CASE_: List[Any] = True
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(__a )
A__ = AddedToken('<mask>' , lstrip=__a , rstrip=__a )
A__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def _UpperCAmelCase ( self , __a , __a=False , __a=20 , __a=5 ):
"""simple docstring"""
A__ , A__ = self.get_input_output_texts(__a )
A__ = tokenizer.encode(__a , add_special_tokens=__a )
A__ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = '<pad>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(__a ) , 81 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A__ = tokenizer.vocab_size
A__ = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
A__ = tokenizer.add_tokens(__a )
A__ = tokenizer.vocab_size
A__ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
A__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
A__ = tokenizer.add_special_tokens(__a )
A__ = tokenizer.vocab_size
A__ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
A__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__a , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(__a )
# fmt: off
self.assertListEqual(__a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
A__ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=__a , )
| 554 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __a , __a=7 , __a=3 , __a=30 , __a=400 , __a=True , __a=None , __a=0.9 , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A__ = size if size is not None else {'shortest_edge': 30}
A__ = crop_size if crop_size is not None else {'height': 30, 'width': 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def _UpperCAmelCase ( self ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = PoolFormerImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'crop_pct' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 554 | 1 |
import qiskit
def _lowerCAmelCase ( UpperCamelCase__: int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
A = qubits
# Using Aer's simulator
A = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
A = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
A = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 641 |
_lowercase : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 641 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCamelCase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__UpperCamelCase = {
'''openbmb/cpm-ant-10b''': 1024,
}
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> Any:
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = token.rstrip('\n' )
SCREAMING_SNAKE_CASE = index
return vocab
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , lowerCAmelCase__=200 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = vocab
SCREAMING_SNAKE_CASE = unk_token
SCREAMING_SNAKE_CASE = max_input_chars_per_word
def __A ( self , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
while start < len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
while start < end:
SCREAMING_SNAKE_CASE = ''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = end
return sub_tokens
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<d>" , lowerCAmelCase__="</d>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="</n>" , lowerCAmelCase__="</_>" , lowerCAmelCase__="left" , **lowerCAmelCase__ , ) -> Dict:
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=lowerCAmelCase__ , eod_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , line_token=lowerCAmelCase__ , space_token=lowerCAmelCase__ , padding_side=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = bod_token
SCREAMING_SNAKE_CASE = eod_token
SCREAMING_SNAKE_CASE = load_vocab(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.encoder[space_token]
SCREAMING_SNAKE_CASE = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __A ( self ) -> List[str]:
return self.encoder[self.bod_token]
@property
def __A ( self ) -> Optional[int]:
return self.encoder[self.eod_token]
@property
def __A ( self ) -> Any:
return self.encoder["\n"]
@property
def __A ( self ) -> int:
return len(self.encoder )
def __A ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = []
for x in jieba.cut(lowerCAmelCase__ , cut_all=lowerCAmelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase__ ) )
return output_tokens
def __A ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
return token in self.encoder
def __A ( self , lowerCAmelCase__ ) -> str:
return "".join(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> int:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self , lowerCAmelCase__ ) -> List[Any]:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if os.path.isdir(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE = (filename_prefix + '-' if filename_prefix else '') + save_directory
SCREAMING_SNAKE_CASE = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE = self.encoder['\n']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ ))
return [1] + ([0] * len(lowerCAmelCase__ ))
| 327 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = """efficientformer"""
def __init__( self , lowerCAmelCase__ = [3, 2, 6, 4] , lowerCAmelCase__ = [48, 96, 224, 448] , lowerCAmelCase__ = [True, True, True, True] , lowerCAmelCase__ = 448 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 7 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 1e-5 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1e-12 , lowerCAmelCase__ = 224 , lowerCAmelCase__ = 1e-05 , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 327 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(length - 1 ):
A__ = i
for k in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if collection[k] < collection[least]:
A__ = k
if least != i:
A__ , A__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 514 | 0 |
from __future__ import annotations
_lowercase = "Muhammad Umer Farooq"
_lowercase = "MIT"
_lowercase = "1.0.0"
_lowercase = "Muhammad Umer Farooq"
_lowercase = "[email protected]"
_lowercase = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCAmelCase ( A__ ):
def __init__( self , a__):
super().__init__()
A__ = []
A__ = domain
def snake_case_ ( self , a__ , a__):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
A__ = parse.urljoin(self.domain , a__)
self.urls.append(a__)
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> str:
return ".".join(get_sub_domain_name(UpperCamelCase_ ).split('''.''' )[-2:] )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> str:
return parse.urlparse(UpperCamelCase_ ).netloc
def lowerCAmelCase__ ( UpperCamelCase_ : str = "https://github.com" )-> list[str]:
A__ = get_domain_name(UpperCamelCase_ )
# Initialize the parser
A__ = Parser(UpperCamelCase_ )
try:
# Open URL
A__ = requests.get(UpperCamelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
A__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
A__ = requests.get(UpperCamelCase_ )
# Get the valid email.
A__ = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase_ )
if __name__ == "__main__":
_lowercase = emails_from_url("https://github.com")
print(F"{len(emails)} emails found:")
print("\n".join(sorted(emails)))
| 526 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowercase = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''albert'''
def __init__( self , a__=3_0_0_0_0 , a__=1_2_8 , a__=4_0_9_6 , a__=1_2 , a__=1 , a__=6_4 , a__=1_6_3_8_4 , a__=1 , a__="gelu_new" , a__=0 , a__=0 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0.1 , a__="absolute" , a__=0 , a__=2 , a__=3 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__)
A__ = vocab_size
A__ = embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_hidden_groups
A__ = num_attention_heads
A__ = inner_group_num
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = classifier_dropout_prob
A__ = position_embedding_type
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 526 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Tuple = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *_lowercase : int , **_lowercase : Optional[int] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Tuple , *_lowercase : List[Any] , **_lowercase : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : List[str] , *_lowercase : Optional[int] , **_lowercase : List[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *_lowercase : Optional[Any] , **_lowercase : List[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Optional[Any] , *_lowercase : Tuple , **_lowercase : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Dict , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *_lowercase : int , **_lowercase : List[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Any , *_lowercase : str , **_lowercase : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : int , *_lowercase : Dict , **_lowercase : List[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : str = ["torch", "transformers", "onnx"]
def __init__( self : str , *_lowercase : str , **_lowercase : Optional[int] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Union[str, Any] , *_lowercase : Any , **_lowercase : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Tuple , *_lowercase : Tuple , **_lowercase : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : int = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *_lowercase : List[str] , **_lowercase : Any ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : List[str] , *_lowercase : List[str] , **_lowercase : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : int , *_lowercase : Any , **_lowercase : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Any = ["torch", "transformers", "onnx"]
def __init__( self : int , *_lowercase : Any , **_lowercase : Optional[int] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Tuple , *_lowercase : Any , **_lowercase : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a ( cls : Optional[Any] , *_lowercase : Optional[int] , **_lowercase : int ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 49 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,image_processor=_lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ,threshold=0.0 )
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
} ,)
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__lowercase = object_detector(_lowerCamelCase ,threshold=0.0 )
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
} ,)
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__lowercase = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = 0.9_9_8_5
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''Narsil/layoutlmv3-finetuned-funsd'''
__lowercase = 0.9_9_9_3
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase ,threshold=_lowerCamelCase )
__lowercase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] ,)
| 502 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCAmelCase_ ( ):
a__ = Github(os.environ['GITHUB_TOKEN'] )
a__ = g.get_repo('huggingface/diffusers' )
a__ = repo.get_issues(state='open' )
for issue in open_issues:
a__ = sorted(issue.get_comments() , key=lambda a : i.created_at , reverse=_A )
a__ = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
a__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
a__ = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' )
return image
def lowerCAmelCase_ ( a : int ):
a__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( a : int , a : Dict , a : Optional[Any] ):
a__ = dct.pop(a )
a__ = val
def lowerCAmelCase_ ( a : List[Any] , a : Any ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a__ = torch.cat((q_bias, torch.zeros_like(a , requires_grad=a ), v_bias) )
a__ = qkv_bias
def lowerCAmelCase_ ( a : List[Any] ):
a__ = 364 if 'coco' in model_name else 224
a__ = InstructBlipVisionConfig(image_size=a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
a__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
a__ = InstructBlipConfig(vision_config=a , text_config=a , qformer_config=a )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any]=None , a : Any=False ):
a__ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
a__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
a__ , a__ = get_blipa_config(a )
a__ = InstructBlipForConditionalGeneration(a ).eval()
a__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
a__ , a__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
a__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
a__ , a__ , a__ = load_model_and_preprocess(
name=a , model_type=a , is_eval=a , device=a )
original_model.eval()
print('Done!' )
# update state dict keys
a__ = original_model.state_dict()
a__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ = state_dict.pop(a )
if key.startswith('Qformer.bert' ):
a__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a__ = key.replace('self' , 'attention' )
if "llm_proj" in key:
a__ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
a__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
a__ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
a__ = key.replace('t5' , 'language' )
a__ = val
# read in qv biases
read_in_q_v_bias(a , a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(a , strict=a )
a__ = load_demo_image()
a__ = 'What is unusual about this image?'
# create processor
a__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a , image_std=a )
a__ = InstructBlipProcessor(
image_processor=a , tokenizer=a , qformer_tokenizer=a , )
a__ = processor(images=a , text=a , return_tensors='pt' ).to(a )
# make sure processor creates exact same pixel values
a__ = vis_processors['eval'](a ).unsqueeze(0 ).to(a )
a__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , a )
original_model.to(a )
hf_model.to(a )
with torch.no_grad():
if "vicuna" in model_name:
a__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
a__ = hf_model(**a ).logits
else:
a__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
a__ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(a )
a__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a__ = hf_model(**a , labels=a ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , a , atol=a )
print('Looks ok!' )
print('Generating with original model...' )
a__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
a__ = hf_model.generate(
**a , do_sample=a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ = 2
print('Original generation:' , a )
a__ = processor.batch_decode(a , skip_special_tokens=a )
a__ = [text.strip() for text in output_text]
print('HF generation:' , a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a )
hf_model.save_pretrained(a )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
__A : Tuple = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__A : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 126 | 0 |
import operator
def A__ ( __A : str , __A : int = False , __A : Any = None ) ->List[Any]:
__A =operator.lt if reverse else operator.gt
__A =solution or []
if not arr:
return solution
__A =[arr.pop(0 )]
for i, item in enumerate(__A ):
if _operator(__A , sublist[-1] ):
sublist.append(__A )
arr.pop(__A )
# merging sublist into solution list
if not solution:
solution.extend(__A )
else:
while sublist:
__A =sublist.pop(0 )
for i, xx in enumerate(__A ):
if not _operator(__A , __A ):
solution.insert(__A , __A )
break
else:
solution.append(__A )
strand_sort(__A , __A , __A )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 184 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
assert (
isinstance(a , a ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
__snake_case , __snake_case = 1, 1
for _ in range(number_of_steps - 1 ):
__snake_case , __snake_case = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __magic_name__ ( _UpperCamelCase ):
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , __magic_name__ )
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = DistilBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = DistilBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = DistilBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = DistilBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = DistilBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase : Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , dim=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = DistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowerCAmelCase = True
_lowerCAmelCase = model_class(config=__magic_name__ )
_lowerCAmelCase = self._prepare_for_class(__magic_name__ , __magic_name__ )
_lowerCAmelCase = torch.jit.trace(
__magic_name__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , 'traced_model.pt' ) )
_lowerCAmelCase = torch.jit.load(os.path.join(__magic_name__ , 'traced_model.pt' ) , map_location=__magic_name__ )
loaded(inputs_dict['input_ids'].to(__magic_name__ ) , inputs_dict['attention_mask'].to(__magic_name__ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ )[0]
_lowerCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 309 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a__ : Optional[Any] = """
import os
"""
a__ : Optional[Any] = """
def foo():
import os
return False
"""
a__ : Tuple = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
a__ : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
a__ : Optional[Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
a__ : Optional[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
a__ : Dict = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
a__ : str = """
import os
try:
import bar
except:
raise ValueError()
"""
a__ : List[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
a__ : List[str] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
a__ : Union[str, Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case', __lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = os.path.join(__lowerCamelCase, 'test_file.py' )
with open(__lowerCamelCase, 'w' ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
_lowerCAmelCase = get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 309 | 1 |
__a :List[str] = 6_5521
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = 1
A_ = 0
for plain_chr in plain_text:
A_ = (a + ord(__UpperCamelCase )) % MOD_ADLER
A_ = (b + a) % MOD_ADLER
return (b << 16) | a | 86 |
import math
def a__ ( A_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( A_ = 0.1 ):
'''simple docstring'''
__magic_name__ = 3
__magic_name__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
return " ".join(
''''''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case : List[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['BeitFeatureExtractor']
_snake_case : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 377 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 252 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = " " ) -> list:
"""simple docstring"""
snake_case__ : str = []
snake_case__ : int = 0
for index, char in enumerate(__lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case__ : Dict = index + 1
elif index + 1 == len(__lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 252 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
lowercase_ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase_ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase_ : Tuple = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skip("Test was skipped" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version(">=" , _UpperCamelCase ) , F"""test requires torch version >= {version}""" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_UpperCamelCase )
UpperCamelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_UpperCamelCase )
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Union[str, Any] = True
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls : List[str] ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a )
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] , a : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
lowercase_ : int = mocks if isinstance(a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = AcceleratorState()
lowercase_ : List[Any] = tensor[None].clone().to(state.device )
lowercase_ : Any = gather(_UpperCamelCase ).cpu()
lowercase_ : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str , a : Any , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = returncode
lowercase_ : str = stdout
lowercase_ : List[Any] = stderr
async def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
while True:
lowercase_ : Optional[int] = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_UpperCamelCase ) )
lowercase_ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase_ : Tuple = []
lowercase_ : Any = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
lowercase_ : List[Any] = line.decode("utf-8" ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ):
"""simple docstring"""
lowercase_ : Dict = asyncio.get_event_loop()
lowercase_ : List[str] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
lowercase_ : Optional[int] = " ".join(_UpperCamelCase )
if result.returncode > 0:
lowercase_ : str = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class _UpperCAmelCase ( snake_case ):
pass
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
lowercase_ : List[Any] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , "decode" ):
lowercase_ : List[str] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 640 |
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.